consumer-pgmq 1.0.0 → 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md
CHANGED
|
@@ -28,4 +28,191 @@ npm install consumer-pgmq
|
|
|
28
28
|
yarn add consumer-pgmq
|
|
29
29
|
```
|
|
30
30
|
|
|
31
|
+
## Options
|
|
32
|
+
|
|
33
|
+
- queueName: The name of the queue.
|
|
34
|
+
- visibilityTime: The time in seconds that the message will be invisible to other consumers. PS:
|
|
35
|
+
- Your handler must finish in this time or the message will be visible again to other consumers.
|
|
36
|
+
- Is used too to abort the message if the handler takes too long to finish. For example, if you set visibilityTime to 15 seconds and your handler didnt finish in 15 seconds the handler will be aborted and the message will be visible again to other consumers.
|
|
37
|
+
- consumeType: The type of consume. Can be "read" or "pop"
|
|
38
|
+
- Read consume type is when the consumer gets the message and the message is not deleted from queue until the callback is executed with success.
|
|
39
|
+
- Pop consume type is when the consumer gets the message and delete from queue in the moment get the message.
|
|
40
|
+
- poolSize: The number of consumers. PS: this is the number of consumers that will be created to consume the messages and
|
|
41
|
+
if you use read consume type, the pool size is the number of messages will get at the same time.
|
|
42
|
+
- timeMsWaitBeforeNextPolling: The time in milliseconds to wait before the next polling
|
|
43
|
+
|
|
44
|
+
## Events
|
|
45
|
+
|
|
46
|
+
- finish: When the message is consumed with success
|
|
47
|
+
- abort-error: When the message is aborted
|
|
48
|
+
- error: When an error occurs
|
|
49
|
+
|
|
50
|
+
## Examples how to use
|
|
51
|
+
|
|
52
|
+
- Consuming messages from Supabase queue
|
|
53
|
+
```javascript
|
|
54
|
+
import { config } from "dotenv"
|
|
55
|
+
config()
|
|
56
|
+
|
|
57
|
+
import Consumer from '../src/consumer';
|
|
58
|
+
import { createClient, SupabaseClient } from '@supabase/supabase-js';
|
|
59
|
+
import SupabaseQueueDriver from '../src/queueDriver/SupabaseQueueDriver';
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
const supabase = createClient(
|
|
63
|
+
// @ts-ignore
|
|
64
|
+
process.env.SUPABASE_URL,
|
|
65
|
+
process.env.SUPABASE_ANON_KEY,
|
|
66
|
+
{
|
|
67
|
+
db: {
|
|
68
|
+
schema: 'pgmq_public'
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
);
|
|
72
|
+
|
|
73
|
+
const supabaseQueueDriver = new SupabaseQueueDriver(
|
|
74
|
+
supabase as unknown as SupabaseClient
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
import timersPromises from "node:timers/promises";
|
|
79
|
+
|
|
80
|
+
async function start() {
|
|
81
|
+
for (let i = 0; i < 200; i++) {
|
|
82
|
+
await supabase.rpc("send", {
|
|
83
|
+
queue_name: "subscriptions",
|
|
84
|
+
message: { "message": `Message triggered at ${Date.now()}` }
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
console.log("Total messages sent: ", 200)
|
|
88
|
+
|
|
89
|
+
const consumer = new Consumer(
|
|
90
|
+
{
|
|
91
|
+
queueName: 'subscriptions',
|
|
92
|
+
visibilityTime: 15,
|
|
93
|
+
consumeType: "read",
|
|
94
|
+
poolSize: 4,
|
|
95
|
+
timeMsWaitBeforeNextPolling: 1000
|
|
96
|
+
},
|
|
97
|
+
async function (message: { [key: string]: any }, signal): Promise<void> {
|
|
98
|
+
try {
|
|
99
|
+
console.log(message)
|
|
100
|
+
const url = "https://jsonplaceholder.typicode.com/todos/1";
|
|
101
|
+
await timersPromises.setTimeout(100, null, { signal });
|
|
102
|
+
console.log("Fetching data...");
|
|
103
|
+
const response = await fetch(url, { signal });
|
|
104
|
+
const todo = await response.json();
|
|
105
|
+
console.log("Todo:", todo);
|
|
106
|
+
} catch (error: any) {
|
|
107
|
+
if (error.name === "AbortError") {
|
|
108
|
+
console.log("Operation aborted");
|
|
109
|
+
} else {
|
|
110
|
+
console.error("Error:", error);
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
},
|
|
114
|
+
supabaseQueueDriver
|
|
115
|
+
);
|
|
116
|
+
|
|
117
|
+
consumer.on('finish', (message: { [key: string]: any }) => {
|
|
118
|
+
console.log('Consumed message =>', message);
|
|
119
|
+
});
|
|
120
|
+
|
|
121
|
+
consumer.on("abort-error", (err) => {
|
|
122
|
+
console.log("Abort error =>", err)
|
|
123
|
+
})
|
|
124
|
+
|
|
125
|
+
consumer.on('error', (err: Error) => {
|
|
126
|
+
if (err.message.includes("TypeError: fetch failed")) {
|
|
127
|
+
console.log(err)
|
|
128
|
+
process.exit(1);
|
|
129
|
+
}
|
|
130
|
+
console.error('Error consuming message:', err.message);
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
consumer.start();
|
|
134
|
+
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
start()
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
- Consuming messages from Postgresql queue
|
|
141
|
+
```javascript
|
|
142
|
+
import { config } from "dotenv"
|
|
143
|
+
config()
|
|
144
|
+
|
|
145
|
+
import Consumer from '../src/consumer';
|
|
146
|
+
import PostgresQueueDriver from '../src/queueDriver/PostgresQueueDriver';
|
|
147
|
+
import timersPromises from "node:timers/promises";
|
|
148
|
+
import knex from 'knex'
|
|
149
|
+
|
|
150
|
+
async function start() {
|
|
151
|
+
const connection = knex({
|
|
152
|
+
client: 'pg',
|
|
153
|
+
connection: {
|
|
154
|
+
host: process.env.POSTGRES_HOST,
|
|
155
|
+
database: process.env.POSTGRES_DATABASE,
|
|
156
|
+
password: process.env.POSTGRES_PASSWORD,
|
|
157
|
+
port: Number(process.env.POSTGRES_PORT),
|
|
158
|
+
user: process.env.POSTGRES_USER,
|
|
159
|
+
ssl: false
|
|
160
|
+
}
|
|
161
|
+
});
|
|
162
|
+
|
|
163
|
+
const postgresQueueDriver = new PostgresQueueDriver(connection)
|
|
164
|
+
|
|
165
|
+
const consumer = new Consumer(
|
|
166
|
+
{
|
|
167
|
+
queueName: 'subscriptions',
|
|
168
|
+
visibilityTime: 15,
|
|
169
|
+
consumeType: "read",
|
|
170
|
+
poolSize: 4,
|
|
171
|
+
timeMsWaitBeforeNextPolling: 1000
|
|
172
|
+
},
|
|
173
|
+
async function (message: { [key: string]: any }, signal): Promise<void> {
|
|
174
|
+
try {
|
|
175
|
+
console.log(message)
|
|
176
|
+
const url = "https://jsonplaceholder.typicode.com/todos/1";
|
|
177
|
+
await timersPromises.setTimeout(100, null, { signal });
|
|
178
|
+
console.log("Fetching data...");
|
|
179
|
+
const response = await fetch(url, { signal });
|
|
180
|
+
const todo = await response.json();
|
|
181
|
+
console.log("Todo:", todo);
|
|
182
|
+
} catch (error: any) {
|
|
183
|
+
if (error.name === "AbortError") {
|
|
184
|
+
console.log("Operation aborted");
|
|
185
|
+
} else {
|
|
186
|
+
console.error("Error:", error);
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
},
|
|
190
|
+
postgresQueueDriver
|
|
191
|
+
);
|
|
192
|
+
|
|
193
|
+
consumer.on('finish', (message: { [key: string]: any }) => {
|
|
194
|
+
console.log('Consumed message =>', message);
|
|
195
|
+
});
|
|
196
|
+
|
|
197
|
+
consumer.on("abort-error", (err) => {
|
|
198
|
+
console.log("Abort error =>", err)
|
|
199
|
+
})
|
|
200
|
+
|
|
201
|
+
consumer.on('error', (err: Error) => {
|
|
202
|
+
if (err.message.includes("TypeError: fetch failed")) {
|
|
203
|
+
console.log(err)
|
|
204
|
+
process.exit(1);
|
|
205
|
+
}
|
|
206
|
+
console.error('Error consuming message:', err.message);
|
|
207
|
+
});
|
|
208
|
+
|
|
209
|
+
consumer.start();
|
|
210
|
+
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
start()
|
|
214
|
+
```
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
|
|
31
218
|
|
|
@@ -15,11 +15,11 @@ async function start() {
|
|
|
15
15
|
password: process.env.POSTGRES_PASSWORD,
|
|
16
16
|
port: Number(process.env.POSTGRES_PORT),
|
|
17
17
|
user: process.env.POSTGRES_USER,
|
|
18
|
-
ssl: false
|
|
18
|
+
ssl: false,
|
|
19
19
|
}
|
|
20
20
|
});
|
|
21
21
|
|
|
22
|
-
const postgresQueueDriver = new PostgresQueueDriver(connection)
|
|
22
|
+
const postgresQueueDriver = new PostgresQueueDriver(connection, "pgmq")
|
|
23
23
|
|
|
24
24
|
const consumer = new Consumer(
|
|
25
25
|
{
|
package/package.json
CHANGED
|
@@ -3,8 +3,10 @@ import { Message, QueueDriver } from "../type";
|
|
|
3
3
|
|
|
4
4
|
class PostgresQueueDriver implements QueueDriver {
|
|
5
5
|
|
|
6
|
+
|
|
6
7
|
constructor(
|
|
7
|
-
private connection: Knex
|
|
8
|
+
private connection: Knex,
|
|
9
|
+
private schema: string = "public"
|
|
8
10
|
) { }
|
|
9
11
|
|
|
10
12
|
/**
|
|
@@ -17,7 +19,7 @@ class PostgresQueueDriver implements QueueDriver {
|
|
|
17
19
|
async get(queueName: string, visibilityTime: number, totalMessages: number): Promise<{ data: Message[]; error: any; }> {
|
|
18
20
|
try {
|
|
19
21
|
const register = await this.connection.raw(`
|
|
20
|
-
SELECT * FROM
|
|
22
|
+
SELECT * FROM ${this.schema}.read(
|
|
21
23
|
queue_name => ?,
|
|
22
24
|
vt => ?,
|
|
23
25
|
qty => ?
|
|
@@ -44,7 +46,7 @@ class PostgresQueueDriver implements QueueDriver {
|
|
|
44
46
|
async pop(queueName: string): Promise<{ data: Message[]; error: any; }> {
|
|
45
47
|
try {
|
|
46
48
|
const register = await this.connection.raw(`
|
|
47
|
-
SELECT * FROM
|
|
49
|
+
SELECT * FROM ${this.schema}.pop(
|
|
48
50
|
queue_name => ?
|
|
49
51
|
);
|
|
50
52
|
`, [queueName]
|
|
@@ -70,7 +72,7 @@ class PostgresQueueDriver implements QueueDriver {
|
|
|
70
72
|
async delete(queueName: string, messageID: number): Promise<{ error: any; }> {
|
|
71
73
|
try {
|
|
72
74
|
await this.connection.raw(`
|
|
73
|
-
SELECT * FROM
|
|
75
|
+
SELECT * FROM ${this.schema}.delete(
|
|
74
76
|
queue_name => ?,
|
|
75
77
|
msg_id => ?
|
|
76
78
|
);
|