consumer-pgmq 2.0.1 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -11,6 +11,7 @@ This project is a consumer of Supabase/Postgresql queue(using pgmq extension) to
11
11
  - Pop consume type is when the consumer gets the message and the message is deleted from queue.
12
12
  - Support for both Supabase and Postgresql
13
13
  - Support for both visibility time and pool size
14
+ - Support to control the limit of consumers running at once. PS: is focus on Postgres queue driver, is helpful when you have a weak database where no allow to have a lot of connections.
14
15
 
15
16
  ## Installation
16
17
 
@@ -40,7 +41,19 @@ yarn add consumer-pgmq
40
41
  - poolSize: The number of consumers. PS: this is the number of consumers that will be created to consume the messages and
41
42
  if you use read consume type, the pool size is the number of messages will get at the same time.
42
43
  - timeMsWaitBeforeNextPolling: The time in milliseconds to wait before the next polling
43
- - enabledPolling: The enabled polling. PS: if true, the consumer will poll the message, if false, the consumer will consume the message one time and stop. PS: is required to the versions more than 1.0.5
44
+ - enabledPolling: The enabled polling. PS: if true, the consumer will poll the message, if false, the consumer will consume the message one time and stop. PS: is required to the versions more than 1.0.5.
45
+ - queueNameDlq: The name of the dead letter queue. PS: recommended to set the same name of the queue, but suffix with '_dlq'. For example: **messages_dlq**
46
+ - totalRetriesBeforeSendToDlq: The total retries before send to dlq. For example: if you set totalRetriesBeforeSendToDlq equal 2, the message will be sent to dlq if the handler fails 2 times, so the third time the message will be sent to dlq and remove the main queue to avoid infinite retries.
47
+ - enableControlConsumer: The enable control consumer. PS: if true, you can control the number of consumers running at once, because your Postgres database is weak and can be a problem to have a lot of connections.
48
+ - Warning:
49
+ - You need to execute SQL to create the table 'workers' and you can use **schema.sql** file for it.
50
+ - If you need to increase the number of workers you can insert new registers on table 'workers'.
51
+ - Each register on table workers will represent a process running as consumer(worker). PS: status column can be 'idle' or 'working'. The **idle** means is waiting to be allocate by a consumer and the **working** means was allocated by a consumer.
52
+
53
+ ## Extra points to know when use the dlq feature
54
+ - The dead letter queue no work If you setted the consumerType option with value 'pop', because the pop get the message and remove from queue at same time, so if failed when you are processing you lose the message.
55
+ - Recommendation no set lower value to the option 'visibilityTime' if you are using the dead letter queue feature. For example: set visibilityTime value lower than 30 seconds, because if the message wasn't delete and the message be available again the consumer application can consume the message again.
56
+ - On Postgresql queue driver when you enable the option 'isCustomQueueImplementation', means you created the custom table to work as queue. PS: in this case you need to use the **schema.sql** file to create a queue table, if you dont want jobs and jobs_dlq as table name you can change in the **schema.sql** file.
44
57
 
45
58
  ## Events
46
59
 
@@ -89,11 +102,13 @@ async function start() {
89
102
  const consumer = new Consumer(
90
103
  {
91
104
  queueName: 'subscriptions',
92
- visibilityTime: 15,
105
+ visibilityTime: 30,
93
106
  consumeType: "read",
94
- poolSize: 4,
107
+ poolSize: 8,
95
108
  timeMsWaitBeforeNextPolling: 1000,
96
- enabledPolling: false
109
+ enabledPolling: true,
110
+ queueNameDlq: "subscriptions_dlq",
111
+ totalRetriesBeforeSendToDlq: 2
97
112
  },
98
113
  async function (message: { [key: string]: any }, signal): Promise<void> {
99
114
  try {
@@ -143,33 +158,39 @@ start()
143
158
  import { config } from "dotenv"
144
159
  config()
145
160
 
146
- import { Consumer, PostgresQueueDriver } from "consumer-pgmq"
147
- import timersPromises from "node:timers/promises";
148
- import knex from 'knex'
161
+ import Consumer from '../src/consumer';
162
+ import PostgresQueueDriver from '../src/queueDriver/PostgresQueueDriver';
163
+
164
+ import { Client } from 'pg'
149
165
 
150
166
  async function start() {
151
- const connection = knex({
152
- client: 'pg',
153
- connection: {
154
- host: process.env.POSTGRES_HOST,
155
- database: process.env.POSTGRES_DATABASE,
156
- password: process.env.POSTGRES_PASSWORD,
157
- port: Number(process.env.POSTGRES_PORT),
158
- user: process.env.POSTGRES_USER,
159
- ssl: false
160
- }
161
- });
162
167
 
163
- const postgresQueueDriver = new PostgresQueueDriver(connection, "schema_name_here")
168
+ const pgClient = new Client({
169
+ host: process.env.POSTGRES_HOST,
170
+ database: process.env.POSTGRES_DATABASE,
171
+ password: process.env.POSTGRES_PASSWORD,
172
+ port: Number(process.env.POSTGRES_PORT),
173
+ user: process.env.POSTGRES_USER,
174
+ ssl: false,
175
+ })
176
+
177
+ await pgClient.connect()
178
+
179
+
180
+ const postgresQueueDriver = new PostgresQueueDriver(
181
+ pgClient, "pgmq"
182
+ )
164
183
 
165
184
  const consumer = new Consumer(
166
185
  {
167
186
  queueName: 'subscriptions',
168
- visibilityTime: 15,
187
+ visibilityTime: 30,
169
188
  consumeType: "read",
170
- poolSize: 4,
189
+ poolSize: 8,
171
190
  timeMsWaitBeforeNextPolling: 1000,
172
- enabledPolling: false
191
+ enabledPolling: true,
192
+ queueNameDlq: "subscriptions_dlq",
193
+ totalRetriesBeforeSendToDlq: 2
173
194
  },
174
195
  async function (message: { [key: string]: any }, signal): Promise<void> {
175
196
  try {
@@ -214,6 +235,82 @@ async function start() {
214
235
  start()
215
236
  ```
216
237
 
238
+ - Consuming messages from Postgresql queue has option enabledControlConsumer and isCustomQueueImplementation enable on Postgres queue driver:
239
+
240
+ ```js
241
+ import { config } from "dotenv"
242
+ config()
243
+
244
+ import Consumer from '../src/consumer';
245
+ import PostgresQueueDriver from '../src/queueDriver/PostgresQueueDriver';
246
+
247
+ import { Client } from 'pg'
248
+
249
+ async function start() {
250
+
251
+ const pgClient = new Client({
252
+ host: process.env.POSTGRES_HOST,
253
+ database: process.env.POSTGRES_DATABASE,
254
+ password: process.env.POSTGRES_PASSWORD,
255
+ port: Number(process.env.POSTGRES_PORT),
256
+ user: process.env.POSTGRES_USER,
257
+ ssl: true,
258
+ })
259
+
260
+ await pgClient.connect()
261
+
262
+
263
+ const postgresQueueDriver = new PostgresQueueDriver(
264
+ pgClient, "public", true
265
+ )
266
+
267
+ const consumer = new Consumer(
268
+ {
269
+ queueName: 'jobs',
270
+ visibilityTime: 30,
271
+ consumeType: "read",
272
+ poolSize: 8,
273
+ timeMsWaitBeforeNextPolling: 1000,
274
+ enabledPolling: true,
275
+ queueNameDlq: "jobs_dlq",
276
+ totalRetriesBeforeSendToDlq: 2,
277
+ enableControlConsumer: true
278
+ },
279
+ async function (message: { [key: string]: any }, signal): Promise<void> {
280
+ console.log(message)
281
+ },
282
+ postgresQueueDriver
283
+ );
284
+
285
+ for (let index = 0; index < 100; index++) {
286
+ await postgresQueueDriver.send("jobs", {
287
+ message: `Message ${index}`,
288
+ id: index
289
+ })
290
+ }
291
+
292
+ consumer.on("send-to-dlq", (message: { [key: string]: any }) => {
293
+ console.log("Send to DLQ =>", message)
294
+ })
295
+
296
+ consumer.on('error', (err: Error) => {
297
+ console.error('Error consuming message:', err.message);
298
+ });
299
+
300
+ await consumer.start();
301
+
302
+ process.on("SIGINT", async () => {
303
+ await consumer.freeConsumer()
304
+ await pgClient.end()
305
+ process.exit(0)
306
+ })
307
+
308
+ }
309
+
310
+ start()
311
+ ```
312
+
313
+
217
314
 
218
315
 
219
316
 
package/dist/consumer.js CHANGED
@@ -8,9 +8,18 @@ const READ = "read";
8
8
  class Consumer extends events_1.EventEmitter {
9
9
  constructor(options, callback, client) {
10
10
  super();
11
+ this.setTimeoutId = null;
12
+ this.id = null;
11
13
  this.options = options;
12
14
  this.callback = callback;
13
15
  this.client = client;
16
+ this.valideOptions();
17
+ this.setTimeoutId = null;
18
+ }
19
+ valideOptions() {
20
+ if (this.options.queueNameDlq && !this.options.totalRetriesBeforeSendToDlq) {
21
+ throw new Error("The option totalRetriesBeforeSendToDlq is required when queueNameDlq is set");
22
+ }
14
23
  }
15
24
  /**
16
25
  * Get the message
@@ -51,6 +60,9 @@ class Consumer extends events_1.EventEmitter {
51
60
  * @private
52
61
  */
53
62
  async pollMessage() {
63
+ if (this.setTimeoutId) {
64
+ clearTimeout(this.setTimeoutId);
65
+ }
54
66
  let promises = [];
55
67
  try {
56
68
  const { data, error } = await this.getMessage();
@@ -58,19 +70,35 @@ class Consumer extends events_1.EventEmitter {
58
70
  throw error;
59
71
  }
60
72
  if (data.length === 0 && this.options.enabledPolling) {
61
- setTimeout(() => this.pollMessage(), (this.options.timeMsWaitBeforeNextPolling || 1000) * 10);
73
+ this.setTimeoutId = setTimeout(() => this.pollMessage(), (this.options.timeMsWaitBeforeNextPolling || 1000) * 10);
62
74
  return;
63
75
  }
64
76
  const controller = new AbortController();
65
77
  const signal = controller.signal;
66
- for (let i = 0; i < (data.length || 1); i++) {
78
+ for (let i = 0; i < data.length; i++) {
79
+ const hasSendToDlq = data[i] &&
80
+ this.options.queueNameDlq &&
81
+ this.options.totalRetriesBeforeSendToDlq &&
82
+ data[i].read_ct > this.options.totalRetriesBeforeSendToDlq;
83
+ if (hasSendToDlq) {
84
+ promises.push(this.client.send(
85
+ // @ts-ignore
86
+ this.options.queueNameDlq, data[i].message, signal).then(async () => {
87
+ await this.deleteMessage(data[i], signal);
88
+ this.emit('send-to-dlq', data[i]);
89
+ }));
90
+ continue;
91
+ }
67
92
  promises.push(this.callback(data[i].message, signal).then(async () => {
68
93
  await this.deleteMessage(data[i], signal);
69
94
  this.emit('finish', data[i]);
70
95
  }));
71
96
  }
72
- setTimeout(() => controller.abort(), (this.options.visibilityTime || 1) * 1000);
73
- await Promise.allSettled(promises);
97
+ const timeoutId = setTimeout(() => controller.abort(), (this.options.visibilityTime || 1) * 1000);
98
+ if (promises.length > 0) {
99
+ await Promise.allSettled(promises);
100
+ }
101
+ clearTimeout(timeoutId);
74
102
  promises = [];
75
103
  }
76
104
  catch (err) {
@@ -85,7 +113,7 @@ class Consumer extends events_1.EventEmitter {
85
113
  if (!this.options.enabledPolling) {
86
114
  return;
87
115
  }
88
- setTimeout(() => this.pollMessage(), this.options.timeMsWaitBeforeNextPolling || 1000);
116
+ this.setTimeoutId = setTimeout(() => this.pollMessage(), this.options.timeMsWaitBeforeNextPolling || 1000);
89
117
  }
90
118
  }
91
119
  /**
@@ -94,7 +122,17 @@ class Consumer extends events_1.EventEmitter {
94
122
  * @public
95
123
  */
96
124
  async start() {
125
+ if (this.options.enableControlConsumer) {
126
+ const { id } = await this.client.allocateConsumer();
127
+ this.id = id;
128
+ }
97
129
  await this.pollMessage();
98
130
  }
131
+ async freeConsumer() {
132
+ if (this.id) {
133
+ console.log("passed on here");
134
+ await this.client.freeConsumer(this.id);
135
+ }
136
+ }
99
137
  }
100
138
  exports.default = Consumer;
@@ -1,9 +1,123 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  class PostgresQueueDriver {
4
- constructor(connection, schema = "public") {
4
+ constructor(connection, schema = "public", isCustomQueueImplementation = true) {
5
5
  this.connection = connection;
6
6
  this.schema = schema;
7
+ this.isCustomQueueImplementation = isCustomQueueImplementation;
8
+ }
9
+ /**
10
+ * Allocate the consumer
11
+ * @returns Promise<{ id: string; }>
12
+ */
13
+ async allocateConsumer() {
14
+ const register = await this.connection.query(`
15
+ WITH next_workers AS (
16
+ SELECT id
17
+ FROM workers as w
18
+ WHERE
19
+ status = 'idle'
20
+ LIMIT 1
21
+ FOR UPDATE SKIP LOCKED
22
+ )
23
+ UPDATE workers
24
+ SET status = 'working',
25
+ updated_at = now()
26
+ FROM next_workers
27
+ WHERE workers.id = next_workers.id
28
+ RETURNING workers.*;
29
+ `);
30
+ if (register.rows.length == 0) {
31
+ throw new Error("No available consumer(worker) to allocate");
32
+ }
33
+ return { id: register.rows[0].id };
34
+ }
35
+ async freeConsumer(id) {
36
+ try {
37
+ await this.connection.query(`UPDATE workers set status = 'idle' WHERE id = $1`, [id]);
38
+ }
39
+ catch (error) {
40
+ throw error;
41
+ }
42
+ }
43
+ async sendCustomQueue(queueName, message) {
44
+ try {
45
+ const query = `INSERT INTO ${this.schema}.${queueName}(payload) VALUES($1)`;
46
+ await this.connection.query(query, [JSON.stringify(message)]);
47
+ return { error: null };
48
+ }
49
+ catch (error) {
50
+ return { error };
51
+ }
52
+ }
53
+ /**
54
+ * Send the message
55
+ * @param queueName The name of the queue
56
+ * @param message The message
57
+ * @returns Promise<{ error: any }>
58
+ */
59
+ async send(queueName, message) {
60
+ if (this.isCustomQueueImplementation) {
61
+ return this.sendCustomQueue(queueName, message);
62
+ }
63
+ try {
64
+ await this.connection.query(`
65
+ SELECT * FROM ${this.schema}.send(
66
+ queue_name => $1,
67
+ msg => $2,
68
+ delay => $3
69
+ );
70
+ `, [queueName, message, 1]);
71
+ return { error: null };
72
+ }
73
+ catch (error) {
74
+ return { error };
75
+ }
76
+ }
77
+ async getCustomQueue(queueName, visibilityTime, totalMessages) {
78
+ try {
79
+ const register = await this.connection.query(`
80
+ WITH next_job AS (
81
+ SELECT id
82
+ FROM ${queueName} as jobs
83
+ WHERE
84
+ (
85
+ status = 'pending'
86
+ OR (status = 'in_progress' AND visible_at <= now())
87
+ )
88
+ ORDER BY created_at
89
+ LIMIT $1
90
+ FOR UPDATE SKIP LOCKED
91
+ )
92
+ UPDATE jobs
93
+ SET status = 'in_progress',
94
+ updated_at = now(),
95
+ visible_at = now() + interval '${visibilityTime} seconds',
96
+ retry_count = retry_count + 1
97
+ FROM next_job
98
+ WHERE jobs.id = next_job.id
99
+ RETURNING jobs.*;
100
+ `, [
101
+ totalMessages,
102
+ ]);
103
+ if (!register.rows) {
104
+ return { data: [], error: null };
105
+ }
106
+ const items = [];
107
+ for (const row of register.rows) {
108
+ items.push({
109
+ msg_id: row.id,
110
+ read_ct: row.retry_count,
111
+ enqueued_at: row.created_at,
112
+ vt: row.visible_at,
113
+ message: row.payload, // Assuming the message content is stored in a column named 'payload' or '
114
+ });
115
+ }
116
+ return { data: items, error: null };
117
+ }
118
+ catch (error) {
119
+ return { data: [], error };
120
+ }
7
121
  }
8
122
  /**
9
123
  * Get the message
@@ -13,12 +127,15 @@ class PostgresQueueDriver {
13
127
  * @returns Promise<{ data: Message[], error: any }>
14
128
  */
15
129
  async get(queueName, visibilityTime, totalMessages) {
130
+ if (this.isCustomQueueImplementation) {
131
+ return this.getCustomQueue(queueName, visibilityTime, totalMessages);
132
+ }
16
133
  try {
17
- const register = await this.connection.raw(`
134
+ const register = await this.connection.query(`
18
135
  SELECT * FROM ${this.schema}.read(
19
- queue_name => ?,
20
- vt => ?,
21
- qty => ?
136
+ queue_name => $1,
137
+ vt => $2,
138
+ qty => $3
22
139
  );
23
140
  `, [queueName, visibilityTime, totalMessages]);
24
141
  if (!register.rows) {
@@ -36,11 +153,18 @@ class PostgresQueueDriver {
36
153
  * @returns Promise<{ data: Message[], error: any }>
37
154
  */
38
155
  async pop(queueName) {
156
+ if (this.isCustomQueueImplementation) {
157
+ const result = await this.getCustomQueue(queueName, 30, 1);
158
+ if (result.data && result.data[0]) {
159
+ await this.delete(queueName, result.data[0].msg_id);
160
+ }
161
+ return result;
162
+ }
39
163
  try {
40
- const register = await this.connection.raw(`
164
+ const register = await this.connection.query(`
41
165
  SELECT * FROM ${this.schema}.pop(
42
- queue_name => ?
43
- );
166
+ queue_name => $1
167
+ )
44
168
  `, [queueName]);
45
169
  if (!register.rows) {
46
170
  return { data: [], error: null };
@@ -51,6 +175,12 @@ class PostgresQueueDriver {
51
175
  return { data: [], error };
52
176
  }
53
177
  }
178
+ async deleteCustomQueue(queueName, messageID) {
179
+ await this.connection.query(`
180
+ DELETE FROM ${queueName}
181
+ WHERE id = $1;`, [messageID]);
182
+ return { error: null };
183
+ }
54
184
  /**
55
185
  * Delete the message
56
186
  * @param queueName The name of the queue
@@ -59,10 +189,13 @@ class PostgresQueueDriver {
59
189
  */
60
190
  async delete(queueName, messageID) {
61
191
  try {
62
- await this.connection.raw(`
192
+ if (this.isCustomQueueImplementation) {
193
+ return this.deleteCustomQueue(queueName, messageID);
194
+ }
195
+ await this.connection.query(`
63
196
  SELECT * FROM ${this.schema}.delete(
64
- queue_name => ?,
65
- msg_id => ?
197
+ queue_name => $1,
198
+ msg_id => $2
66
199
  );
67
200
  `, [queueName, messageID]);
68
201
  return { error: null };
@@ -4,6 +4,19 @@ class SupabaseQueueDriver {
4
4
  constructor(supabase) {
5
5
  this.supabase = supabase;
6
6
  }
7
+ /**
8
+ * Send the message
9
+ * @param queueName The name of the queue
10
+ * @param message The message
11
+ * @returns Promise<{ error: any }>
12
+ */
13
+ async send(queueName, message) {
14
+ const { error } = await this.supabase.rpc("send", {
15
+ queue_name: queueName,
16
+ message: message
17
+ });
18
+ return { error };
19
+ }
7
20
  /**
8
21
  * Get the message
9
22
  * @param queueName The name of the queue
@@ -43,5 +56,11 @@ class SupabaseQueueDriver {
43
56
  });
44
57
  return { error };
45
58
  }
59
+ allocateConsumer() {
60
+ throw new Error("method logic no implemented");
61
+ }
62
+ freeConsumer(id) {
63
+ throw new Error("method logic no implemented");
64
+ }
46
65
  }
47
66
  exports.default = SupabaseQueueDriver;
@@ -3,70 +3,69 @@ config()
3
3
 
4
4
  import Consumer from '../src/consumer';
5
5
  import PostgresQueueDriver from '../src/queueDriver/PostgresQueueDriver';
6
- import timersPromises from "node:timers/promises";
7
- import knex from 'knex'
6
+
7
+ import { Client } from 'pg'
8
8
 
9
9
  async function start() {
10
- const connection = knex({
11
- client: 'pg',
12
- connection: {
13
- host: process.env.POSTGRES_HOST,
14
- database: process.env.POSTGRES_DATABASE,
15
- password: process.env.POSTGRES_PASSWORD,
16
- port: Number(process.env.POSTGRES_PORT),
17
- user: process.env.POSTGRES_USER,
18
- ssl: false,
19
- }
20
- });
21
10
 
22
- const postgresQueueDriver = new PostgresQueueDriver(connection, "pgmq")
11
+ const pgClient = new Client({
12
+ host: process.env.POSTGRES_HOST,
13
+ database: process.env.POSTGRES_DATABASE,
14
+ password: process.env.POSTGRES_PASSWORD,
15
+ port: Number(process.env.POSTGRES_PORT),
16
+ user: process.env.POSTGRES_USER,
17
+ ssl: true,
18
+ })
19
+
20
+ await pgClient.connect()
21
+
22
+
23
+ const postgresQueueDriver = new PostgresQueueDriver(
24
+ pgClient, "public", true
25
+ )
23
26
 
24
27
  const consumer = new Consumer(
25
28
  {
26
- queueName: 'subscriptions',
27
- visibilityTime: 15,
29
+ queueName: 'jobs',
30
+ visibilityTime: 30,
28
31
  consumeType: "read",
29
- poolSize: 4,
32
+ poolSize: 8,
30
33
  timeMsWaitBeforeNextPolling: 1000,
31
- enabledPolling: true
34
+ enabledPolling: true,
35
+ queueNameDlq: "jobs_dlq",
36
+ totalRetriesBeforeSendToDlq: 2,
37
+ enableControlConsumer: true
32
38
  },
33
39
  async function (message: { [key: string]: any }, signal): Promise<void> {
34
- try {
35
- console.log(message)
36
- // const url = "https://jsonplaceholder.typicode.com/todos/1";
37
- // await timersPromises.setTimeout(100, null, { signal });
38
- // console.log("Fetching data...");
39
- // const response = await fetch(url, { signal });
40
- // const todo = await response.json();
41
- // console.log("Todo:", todo);
42
- } catch (error: any) {
43
- if (error.name === "AbortError") {
44
- console.log("Operation aborted");
45
- } else {
46
- console.error("Error:", error);
47
- }
48
- }
40
+ console.log(message)
49
41
  },
50
42
  postgresQueueDriver
51
43
  );
52
44
 
53
- // consumer.on('finish', (message: { [key: string]: any }) => {
54
- // console.log('Consumed message =>', message);
55
- // });
45
+ for (let index = 0; index < 100; index++) {
46
+ await postgresQueueDriver.send("jobs", {
47
+ message: `Message ${index}`,
48
+ id: index
49
+ })
50
+ }
51
+
52
+ console.log("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
56
53
 
57
- consumer.on("abort-error", (err) => {
58
- console.log("Abort error =>", err)
54
+ consumer.on("send-to-dlq", (message: { [key: string]: any }) => {
55
+ console.log("Send to DLQ =>", message)
59
56
  })
60
57
 
61
58
  consumer.on('error', (err: Error) => {
62
- if (err.message.includes("TypeError: fetch failed")) {
63
- console.log(err)
64
- process.exit(1);
65
- }
66
59
  console.error('Error consuming message:', err.message);
67
60
  });
68
61
 
69
- consumer.start();
62
+ await consumer.start();
63
+
64
+ process.on("SIGINT", async () => {
65
+ await consumer.freeConsumer()
66
+ await pgClient.end()
67
+ process.exit(0)
68
+ })
70
69
 
71
70
  }
72
71
 
@@ -21,33 +21,30 @@ const supabaseQueueDriver = new SupabaseQueueDriver(
21
21
  supabase as unknown as SupabaseClient
22
22
  )
23
23
 
24
-
25
- import timersPromises from "node:timers/promises";
26
-
27
24
  async function start() {
28
- // for (let i = 0; i < 200; i++) {
29
- // await supabase.rpc("send", {
30
- // queue_name: "subscriptions",
31
- // message: { "message": `Message triggered at ${Date.now()}` }
32
- // });
33
- // }
34
- // console.log("Total messages sent: ", 200)
25
+ for (let i = 0; i < 50; i++) {
26
+ await supabase.rpc("send", {
27
+ queue_name: "subscriptions",
28
+ message: { "message": `Message triggered at ${Date.now()}` }
29
+ });
30
+ }
31
+ console.log("Total messages sent: ", 50)
35
32
 
36
33
  const consumer = new Consumer(
37
34
  {
38
35
  queueName: 'subscriptions',
39
- visibilityTime: 15,
36
+ visibilityTime: 30,
40
37
  consumeType: "read",
41
38
  poolSize: 8,
42
39
  timeMsWaitBeforeNextPolling: 1000,
43
- enabledPolling: false
40
+ enabledPolling: true,
41
+ queueNameDlq: "subscriptions_dlq",
42
+ totalRetriesBeforeSendToDlq: 2
44
43
  },
45
44
  async function (message: { [key: string]: any }, signal): Promise<void> {
46
45
  try {
47
- if (message.error) {
48
- throw new Error("Error in message")
49
- }
50
46
  console.log(message)
47
+ throw new Error("Error in message")
51
48
  } catch (error: any) {
52
49
  throw error
53
50
  }
@@ -59,6 +56,9 @@ async function start() {
59
56
  // console.log('Consumed message =>', message);
60
57
  // });
61
58
 
59
+ consumer.on("send-to-dlq", (message: { [key: string]: any }) => {
60
+ console.log("Send to DLQ =>", message)
61
+ })
62
62
  consumer.on("abort-error", (err) => {
63
63
  console.log("Abort error =>", err)
64
64
  })