@haathie/pgmb 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/lib/abortable-async-iterator.d.ts +14 -0
- package/lib/abortable-async-iterator.js +86 -0
- package/lib/batcher.d.ts +12 -0
- package/lib/batcher.js +71 -0
- package/lib/client.d.ts +73 -0
- package/lib/client.js +432 -0
- package/lib/consts.d.ts +1 -0
- package/lib/consts.js +4 -0
- package/lib/index.d.ts +5 -0
- package/lib/index.js +19 -0
- package/lib/queries.d.ts +453 -0
- package/lib/queries.js +235 -0
- package/lib/query-types.d.ts +17 -0
- package/lib/query-types.js +2 -0
- package/lib/retry-handler.d.ts +11 -0
- package/lib/retry-handler.js +93 -0
- package/lib/sse.d.ts +4 -0
- package/lib/sse.js +137 -0
- package/lib/types.d.ts +202 -0
- package/lib/types.js +2 -0
- package/lib/utils.d.ts +15 -0
- package/lib/utils.js +52 -0
- package/lib/webhook-handler.d.ts +6 -0
- package/lib/webhook-handler.js +68 -0
- package/package.json +52 -0
- package/readme.md +493 -0
- package/sql/pgmb-0.1.12-0.2.0.sql +1018 -0
- package/sql/pgmb-0.1.12.sql +612 -0
- package/sql/pgmb-0.1.5-0.1.6.sql +256 -0
- package/sql/pgmb-0.1.6-0.1.12.sql +95 -0
- package/sql/pgmb.sql +1030 -0
- package/sql/queries.sql +154 -0
|
@@ -0,0 +1,612 @@
|
|
|
1
|
+
CREATE SCHEMA pgmb;
|
|
2
|
+
|
|
3
|
+
-- type to create a message that's sent to a queue
|
|
4
|
+
CREATE TYPE pgmb.enqueue_msg AS (
|
|
5
|
+
message BYTEA, headers JSONB, consume_at TIMESTAMPTZ
|
|
6
|
+
);
|
|
7
|
+
-- type to create a message that's published to an exchange
|
|
8
|
+
-- This'll be used to publish messages to exchanges
|
|
9
|
+
CREATE TYPE pgmb.publish_msg AS (
|
|
10
|
+
exchange VARCHAR(64), message BYTEA, headers JSONB, consume_at TIMESTAMPTZ
|
|
11
|
+
);
|
|
12
|
+
-- type to store an existing message record
|
|
13
|
+
CREATE TYPE pgmb.msg_record AS (
|
|
14
|
+
id VARCHAR(22), message BYTEA, headers JSONB
|
|
15
|
+
);
|
|
16
|
+
-- type to store the result of a queue's metrics
|
|
17
|
+
CREATE TYPE pgmb.metrics_result AS (
|
|
18
|
+
queue_name VARCHAR(64),
|
|
19
|
+
total_length int,
|
|
20
|
+
consumable_length int,
|
|
21
|
+
newest_msg_age interval,
|
|
22
|
+
oldest_msg_age interval
|
|
23
|
+
);
|
|
24
|
+
|
|
25
|
+
CREATE TYPE pgmb.queue_ack_setting AS ENUM ('archive', 'delete');
|
|
26
|
+
CREATE TYPE pgmb.queue_type AS ENUM ('logged', 'unlogged');
|
|
27
|
+
|
|
28
|
+
-- table for exchanges
|
|
29
|
+
CREATE TABLE pgmb.exchanges (
|
|
30
|
+
name VARCHAR(64) PRIMARY KEY,
|
|
31
|
+
queues VARCHAR(64)[] NOT NULL DEFAULT '{}',
|
|
32
|
+
created_at TIMESTAMPTZ DEFAULT NOW()
|
|
33
|
+
);
|
|
34
|
+
|
|
35
|
+
-- fn to create/delete/add queues/remove queues to exchanges
|
|
36
|
+
CREATE OR REPLACE FUNCTION pgmb.assert_exchange(nm VARCHAR(64))
|
|
37
|
+
RETURNS VOID AS $$
|
|
38
|
+
BEGIN
|
|
39
|
+
INSERT INTO pgmb.exchanges (name) (VALUES (nm))
|
|
40
|
+
ON CONFLICT (name) DO NOTHING;
|
|
41
|
+
END;
|
|
42
|
+
$$ LANGUAGE plpgsql;
|
|
43
|
+
|
|
44
|
+
-- fn to delete an exchange
|
|
45
|
+
CREATE OR REPLACE FUNCTION pgmb.delete_exchange(nm VARCHAR(64))
|
|
46
|
+
RETURNS VOID AS $$
|
|
47
|
+
BEGIN
|
|
48
|
+
DELETE FROM pgmb.exchanges WHERE name = nm;
|
|
49
|
+
END;
|
|
50
|
+
$$ LANGUAGE plpgsql;
|
|
51
|
+
|
|
52
|
+
-- fn to bind a queue to an exchange
|
|
53
|
+
CREATE OR REPLACE FUNCTION pgmb.bind_queue(
|
|
54
|
+
queue_name VARCHAR(64), exchange VARCHAR(64)
|
|
55
|
+
)
|
|
56
|
+
RETURNS VOID AS $$
|
|
57
|
+
BEGIN
|
|
58
|
+
UPDATE pgmb.exchanges
|
|
59
|
+
SET queues = array_append(queues, queue_name)
|
|
60
|
+
WHERE name = exchange AND NOT queue_name = ANY(queues);
|
|
61
|
+
END;
|
|
62
|
+
$$ LANGUAGE plpgsql;
|
|
63
|
+
|
|
64
|
+
CREATE OR REPLACE FUNCTION pgmb.unbind_queue(
|
|
65
|
+
queue_name VARCHAR(64), exchange VARCHAR(64)
|
|
66
|
+
)
|
|
67
|
+
RETURNS VOID AS $$
|
|
68
|
+
BEGIN
|
|
69
|
+
UPDATE pgmb.exchanges
|
|
70
|
+
SET queues = array_remove(queues, queue_name)
|
|
71
|
+
WHERE name = exchange;
|
|
72
|
+
END;
|
|
73
|
+
$$ LANGUAGE plpgsql;
|
|
74
|
+
|
|
75
|
+
-- table for queue metadata
|
|
76
|
+
CREATE TABLE pgmb.queues (
|
|
77
|
+
name VARCHAR(64) PRIMARY KEY,
|
|
78
|
+
schema_name VARCHAR(64) NOT NULL,
|
|
79
|
+
created_at TIMESTAMPTZ DEFAULT NOW(),
|
|
80
|
+
ack_setting pgmb.queue_ack_setting DEFAULT 'delete',
|
|
81
|
+
queue_type pgmb.queue_type DEFAULT 'logged',
|
|
82
|
+
default_headers JSONB DEFAULT '{}'::JSONB
|
|
83
|
+
);
|
|
84
|
+
|
|
85
|
+
-- utility function to create a queue table
|
|
86
|
+
CREATE OR REPLACE FUNCTION pgmb.create_queue_table(
|
|
87
|
+
queue_name VARCHAR(64),
|
|
88
|
+
schema_name VARCHAR(64),
|
|
89
|
+
queue_type pgmb.queue_type
|
|
90
|
+
) RETURNS VOID AS $$
|
|
91
|
+
BEGIN
|
|
92
|
+
-- create the live_messages table
|
|
93
|
+
EXECUTE 'CREATE TABLE ' || quote_ident(schema_name) || '.live_messages (
|
|
94
|
+
id VARCHAR(22) PRIMARY KEY,
|
|
95
|
+
message BYTEA NOT NULL,
|
|
96
|
+
headers JSONB NOT NULL DEFAULT ''{}''::JSONB,
|
|
97
|
+
created_at TIMESTAMPTZ DEFAULT NOW()
|
|
98
|
+
)';
|
|
99
|
+
IF queue_type = 'unlogged' THEN
|
|
100
|
+
EXECUTE 'ALTER TABLE ' || quote_ident(schema_name)
|
|
101
|
+
|| '.live_messages SET UNLOGGED';
|
|
102
|
+
END IF;
|
|
103
|
+
END;
|
|
104
|
+
$$ LANGUAGE plpgsql;
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
-- fn to ensure a queue exists. If bindings are provided, all existing
|
|
108
|
+
-- bindings are removed and the queue is bound to the new exchanges.
|
|
109
|
+
-- @returns true if the queue was created, false if it already exists
|
|
110
|
+
CREATE OR REPLACE FUNCTION pgmb.assert_queue(
|
|
111
|
+
queue_name VARCHAR(64),
|
|
112
|
+
ack_setting pgmb.queue_ack_setting DEFAULT NULL,
|
|
113
|
+
default_headers JSONB DEFAULT NULL,
|
|
114
|
+
queue_type pgmb.queue_type DEFAULT NULL,
|
|
115
|
+
bindings VARCHAR(64)[] DEFAULT NULL
|
|
116
|
+
)
|
|
117
|
+
RETURNS BOOLEAN AS $$
|
|
118
|
+
DECLARE
|
|
119
|
+
-- check if the queue already exists
|
|
120
|
+
schema_name VARCHAR(64);
|
|
121
|
+
_ack_setting pgmb.queue_ack_setting;
|
|
122
|
+
_queue_type pgmb.queue_type;
|
|
123
|
+
BEGIN
|
|
124
|
+
schema_name := 'pgmb_q_' || queue_name;
|
|
125
|
+
|
|
126
|
+
-- if bindings are provided, assert the exchanges,
|
|
127
|
+
-- and bind the queue to them
|
|
128
|
+
IF bindings IS NOT NULL THEN
|
|
129
|
+
-- remove all existing bindings
|
|
130
|
+
UPDATE pgmb.exchanges
|
|
131
|
+
SET queues = array_remove(queues, queue_name)
|
|
132
|
+
WHERE queue_name = ANY(queues);
|
|
133
|
+
-- create the exchanges
|
|
134
|
+
PERFORM pgmb.assert_exchange(binding)
|
|
135
|
+
FROM unnest(bindings) AS binding;
|
|
136
|
+
-- bind the queue to the exchanges
|
|
137
|
+
PERFORM pgmb.bind_queue(queue_name, binding)
|
|
138
|
+
FROM unnest(bindings) AS binding;
|
|
139
|
+
END IF;
|
|
140
|
+
|
|
141
|
+
-- check if the queue already exists
|
|
142
|
+
IF EXISTS (SELECT 1 FROM pgmb.queues WHERE name = queue_name) THEN
|
|
143
|
+
-- queue already exists
|
|
144
|
+
RETURN FALSE;
|
|
145
|
+
END IF;
|
|
146
|
+
-- store in the queues table
|
|
147
|
+
EXECUTE 'INSERT INTO pgmb.queues
|
|
148
|
+
(name, schema_name, ack_setting, default_headers, queue_type)
|
|
149
|
+
VALUES (
|
|
150
|
+
$1,
|
|
151
|
+
$2,'
|
|
152
|
+
|| (CASE WHEN ack_setting IS NULL THEN 'DEFAULT' ELSE '$3' END) || ','
|
|
153
|
+
|| (CASE WHEN default_headers IS NULL THEN 'DEFAULT' ELSE '$4' END) || ','
|
|
154
|
+
|| (CASE WHEN queue_type IS NULL THEN 'DEFAULT' ELSE '$5' END)
|
|
155
|
+
|| ')' USING queue_name, schema_name,
|
|
156
|
+
ack_setting, default_headers, queue_type;
|
|
157
|
+
-- create schema
|
|
158
|
+
EXECUTE 'CREATE SCHEMA ' || quote_ident(schema_name);
|
|
159
|
+
|
|
160
|
+
-- get the saved settings
|
|
161
|
+
SELECT q.ack_setting, q.queue_type FROM pgmb.queues q
|
|
162
|
+
WHERE q.name = queue_name INTO _ack_setting, _queue_type;
|
|
163
|
+
|
|
164
|
+
-- create the live_messages table
|
|
165
|
+
PERFORM pgmb.create_queue_table(queue_name, schema_name, _queue_type);
|
|
166
|
+
-- create the consumed_messages table (if ack_setting is archive)
|
|
167
|
+
IF _ack_setting = 'archive' THEN
|
|
168
|
+
EXECUTE 'CREATE TABLE ' || quote_ident(schema_name) || '.consumed_messages (
|
|
169
|
+
id VARCHAR(22) PRIMARY KEY,
|
|
170
|
+
message BYTEA NOT NULL,
|
|
171
|
+
headers JSONB NOT NULL DEFAULT ''{}''::JSONB,
|
|
172
|
+
success BOOLEAN NOT NULL,
|
|
173
|
+
consumed_at TIMESTAMPTZ DEFAULT NOW()
|
|
174
|
+
)';
|
|
175
|
+
IF _queue_type = 'unlogged' THEN
|
|
176
|
+
EXECUTE 'ALTER TABLE ' || quote_ident(schema_name)
|
|
177
|
+
|| '.consumed_messages SET UNLOGGED';
|
|
178
|
+
END IF;
|
|
179
|
+
END IF;
|
|
180
|
+
RETURN TRUE;
|
|
181
|
+
END;
|
|
182
|
+
$$ LANGUAGE plpgsql;
|
|
183
|
+
|
|
184
|
+
-- fn to delete a queue
|
|
185
|
+
CREATE OR REPLACE FUNCTION pgmb.delete_queue(queue_name VARCHAR(64))
|
|
186
|
+
RETURNS VOID AS $$
|
|
187
|
+
DECLARE
|
|
188
|
+
-- check if the queue already exists
|
|
189
|
+
schema_name VARCHAR(64);
|
|
190
|
+
BEGIN
|
|
191
|
+
-- get schema name
|
|
192
|
+
SELECT q.schema_name FROM pgmb.queues q
|
|
193
|
+
WHERE q.name = queue_name INTO schema_name;
|
|
194
|
+
-- drop the schema
|
|
195
|
+
EXECUTE 'DROP SCHEMA IF EXISTS ' || quote_ident(schema_name) || ' CASCADE';
|
|
196
|
+
-- remove from exchanges
|
|
197
|
+
UPDATE pgmb.exchanges
|
|
198
|
+
SET queues = array_remove(queues, queue_name)
|
|
199
|
+
WHERE queue_name = ANY(queues);
|
|
200
|
+
-- remove from queues
|
|
201
|
+
DELETE FROM pgmb.queues WHERE name = queue_name;
|
|
202
|
+
END;
|
|
203
|
+
$$ LANGUAGE plpgsql;
|
|
204
|
+
|
|
205
|
+
-- fn to purge a queue. Will drop the table and recreate it.
|
|
206
|
+
-- This will delete all messages in the queue.
|
|
207
|
+
CREATE OR REPLACE FUNCTION pgmb.purge_queue(queue_name VARCHAR(64))
|
|
208
|
+
RETURNS VOID AS $$
|
|
209
|
+
DECLARE
|
|
210
|
+
schema_name VARCHAR(64);
|
|
211
|
+
queue_type pgmb.queue_type;
|
|
212
|
+
BEGIN
|
|
213
|
+
-- get schema name
|
|
214
|
+
SELECT q.schema_name, q.queue_type FROM pgmb.queues q
|
|
215
|
+
WHERE q.name = queue_name INTO schema_name, queue_type;
|
|
216
|
+
-- drop the live_messages table
|
|
217
|
+
EXECUTE 'DROP TABLE IF EXISTS '
|
|
218
|
+
|| quote_ident(schema_name) || '.live_messages';
|
|
219
|
+
-- create the live_messages table
|
|
220
|
+
PERFORM pgmb.create_queue_table(queue_name, schema_name, queue_type);
|
|
221
|
+
END;
|
|
222
|
+
$$ LANGUAGE plpgsql;
|
|
223
|
+
|
|
224
|
+
-- fn to create a random bigint. Used for message IDs
|
|
225
|
+
CREATE OR REPLACE FUNCTION pgmb.create_random_bigint()
|
|
226
|
+
RETURNS BIGINT AS $$
|
|
227
|
+
BEGIN
|
|
228
|
+
-- the message ID allows for 7 hex-bytes of randomness,
|
|
229
|
+
-- i.e. 28 bits of randomness. Thus, the max we allow is 2^28/2
|
|
230
|
+
-- i.e. 0xffffff8, which allows for batch inserts to increment the
|
|
231
|
+
-- randomness for up to another 2^28/2 messages (more than enough)
|
|
232
|
+
RETURN (random() * 0xffffff8)::BIGINT;
|
|
233
|
+
END
|
|
234
|
+
$$ LANGUAGE plpgsql VOLATILE PARALLEL SAFE;
|
|
235
|
+
|
|
236
|
+
-- fn to create a unique message ID. This'll be the current timestamp
|
|
237
|
+
-- + a random number
|
|
238
|
+
CREATE OR REPLACE FUNCTION pgmb.create_message_id(
|
|
239
|
+
dt timestamptz DEFAULT clock_timestamp(),
|
|
240
|
+
rand bigint DEFAULT pgmb.create_random_bigint()
|
|
241
|
+
)
|
|
242
|
+
RETURNS VARCHAR(22) AS $$
|
|
243
|
+
BEGIN
|
|
244
|
+
-- create a unique message ID, 16 chars of hex-date
|
|
245
|
+
-- some additional bytes of randomness
|
|
246
|
+
-- ensure the string is always, at most 32 bytes
|
|
247
|
+
RETURN substr(
|
|
248
|
+
'pm'
|
|
249
|
+
|| substr(lpad(to_hex((extract(epoch from dt) * 1000000)::bigint), 13, '0'), 1, 13)
|
|
250
|
+
|| lpad(to_hex(rand), 7, '0'),
|
|
251
|
+
1,
|
|
252
|
+
22
|
|
253
|
+
);
|
|
254
|
+
END
|
|
255
|
+
$$ LANGUAGE plpgsql VOLATILE PARALLEL SAFE;
|
|
256
|
+
|
|
257
|
+
CREATE OR REPLACE FUNCTION pgmb.get_max_message_id(
|
|
258
|
+
dt timestamptz DEFAULT clock_timestamp()
|
|
259
|
+
)
|
|
260
|
+
RETURNS VARCHAR(22) AS $$
|
|
261
|
+
BEGIN
|
|
262
|
+
RETURN pgmb.create_message_id(
|
|
263
|
+
dt,
|
|
264
|
+
rand := 999999999999 -- max randomness
|
|
265
|
+
);
|
|
266
|
+
END
|
|
267
|
+
$$ LANGUAGE plpgsql VOLATILE PARALLEL SAFE;
|
|
268
|
+
|
|
269
|
+
-- fn to extract the date from a message ID.
|
|
270
|
+
CREATE OR REPLACE FUNCTION pgmb.extract_date_from_message_id(
|
|
271
|
+
message_id VARCHAR(22)
|
|
272
|
+
)
|
|
273
|
+
RETURNS TIMESTAMPTZ AS $$
|
|
274
|
+
BEGIN
|
|
275
|
+
-- convert it to a timestamp
|
|
276
|
+
RETURN to_timestamp(('0x' || substr(message_id, 3, 13))::numeric / 1000000);
|
|
277
|
+
END
|
|
278
|
+
$$ LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
|
279
|
+
|
|
280
|
+
-- fn to send multiple messages into a queue
|
|
281
|
+
CREATE OR REPLACE FUNCTION pgmb.send(
|
|
282
|
+
queue_name VARCHAR(64),
|
|
283
|
+
messages pgmb.enqueue_msg[]
|
|
284
|
+
)
|
|
285
|
+
RETURNS SETOF VARCHAR(22) AS $$
|
|
286
|
+
DECLARE
|
|
287
|
+
-- we'll have a starting random number, and each successive message ID's
|
|
288
|
+
-- random component will be this number + the ordinality of the message.
|
|
289
|
+
start_rand constant BIGINT = pgmb.create_random_bigint();
|
|
290
|
+
BEGIN
|
|
291
|
+
-- create the ID for each message, and then send to the internal _send fn
|
|
292
|
+
RETURN QUERY
|
|
293
|
+
WITH msg_records AS (
|
|
294
|
+
SELECT (
|
|
295
|
+
pgmb.create_message_id(
|
|
296
|
+
COALESCE(m.consume_at, clock_timestamp()),
|
|
297
|
+
start_rand + m.ordinality
|
|
298
|
+
),
|
|
299
|
+
m.message,
|
|
300
|
+
m.headers
|
|
301
|
+
)::pgmb.msg_record AS record
|
|
302
|
+
FROM unnest(messages) WITH ORDINALITY AS m
|
|
303
|
+
)
|
|
304
|
+
SELECT pgmb._send(queue_name, ARRAY_AGG(m.record)::pgmb.msg_record[])
|
|
305
|
+
FROM msg_records m;
|
|
306
|
+
END
|
|
307
|
+
$$ LANGUAGE plpgsql;
|
|
308
|
+
|
|
309
|
+
-- internal fn to send multiple messages with an existing ID into a queue
|
|
310
|
+
CREATE OR REPLACE FUNCTION pgmb._send(
|
|
311
|
+
queue_name VARCHAR(64),
|
|
312
|
+
messages pgmb.msg_record[]
|
|
313
|
+
)
|
|
314
|
+
RETURNS SETOF VARCHAR(22) AS $$
|
|
315
|
+
DECLARE
|
|
316
|
+
-- check if the queue already exists
|
|
317
|
+
schema_name VARCHAR(64);
|
|
318
|
+
default_headers JSONB;
|
|
319
|
+
BEGIN
|
|
320
|
+
-- each queue would have its own channel to listen on, so a consumer can
|
|
321
|
+
-- listen to a specific queue. This'll be used to notify the consumer when
|
|
322
|
+
-- new messages are added to the queue.
|
|
323
|
+
PERFORM pg_notify(
|
|
324
|
+
'chn_' || queue_name,
|
|
325
|
+
('{"count":' || array_length(messages, 1)::varchar || '}')::varchar
|
|
326
|
+
);
|
|
327
|
+
|
|
328
|
+
-- get schema name and default headers
|
|
329
|
+
SELECT q.schema_name, q.default_headers FROM pgmb.queues q
|
|
330
|
+
WHERE q.name = queue_name INTO schema_name, default_headers;
|
|
331
|
+
-- Insert the message into the queue and return all message IDs. We use the
|
|
332
|
+
-- ordinality of the array to ensure that each message is inserted in the same
|
|
333
|
+
-- order as it was sent. This is important for the consumer to process the
|
|
334
|
+
-- messages in the same order as they were sent.
|
|
335
|
+
RETURN QUERY
|
|
336
|
+
EXECUTE 'INSERT INTO '
|
|
337
|
+
|| quote_ident(schema_name)
|
|
338
|
+
|| '.live_messages (id, message, headers)
|
|
339
|
+
SELECT
|
|
340
|
+
id,
|
|
341
|
+
message,
|
|
342
|
+
COALESCE($1, ''{}''::JSONB) || COALESCE(headers, ''{}''::JSONB)
|
|
343
|
+
FROM unnest($2)
|
|
344
|
+
RETURNING id' USING default_headers, messages;
|
|
345
|
+
END
|
|
346
|
+
$$ LANGUAGE plpgsql;
|
|
347
|
+
|
|
348
|
+
-- fn to positively/negatively ack 1 or more messages.
|
|
349
|
+
-- If "success": will send to consumed_messages or delete,
|
|
350
|
+
-- based on the queue's ack_setting
|
|
351
|
+
-- If "failure": will ack the message, and requeue it if retries are left
|
|
352
|
+
CREATE OR REPLACE FUNCTION pgmb.ack_msgs(
|
|
353
|
+
queue_name VARCHAR(64),
|
|
354
|
+
success BOOLEAN,
|
|
355
|
+
ids VARCHAR(22)[]
|
|
356
|
+
)
|
|
357
|
+
RETURNS VOID AS $$
|
|
358
|
+
DECLARE
|
|
359
|
+
schema_name VARCHAR(64);
|
|
360
|
+
ack_setting pgmb.queue_ack_setting;
|
|
361
|
+
query_str TEXT;
|
|
362
|
+
deleted_msg_count int;
|
|
363
|
+
BEGIN
|
|
364
|
+
-- get schema name and ack setting
|
|
365
|
+
SELECT q.schema_name, q.ack_setting
|
|
366
|
+
FROM pgmb.queues q
|
|
367
|
+
WHERE q.name = queue_name
|
|
368
|
+
INTO schema_name, ack_setting;
|
|
369
|
+
|
|
370
|
+
-- we'll construct a single CTE query that'll delete messages,
|
|
371
|
+
-- requeue them if needed, and archive them if ack_setting is 'archive'.
|
|
372
|
+
query_str := 'WITH deleted_msgs AS (
|
|
373
|
+
DELETE FROM ' || quote_ident(schema_name) || '.live_messages
|
|
374
|
+
WHERE id = ANY($1)
|
|
375
|
+
RETURNING id, message, headers
|
|
376
|
+
)';
|
|
377
|
+
|
|
378
|
+
-- re-insert messages that can be retried
|
|
379
|
+
IF NOT success THEN
|
|
380
|
+
query_str := query_str || ',
|
|
381
|
+
requeued AS (
|
|
382
|
+
INSERT INTO '
|
|
383
|
+
|| quote_ident(schema_name)
|
|
384
|
+
|| '.live_messages (id, message, headers)
|
|
385
|
+
SELECT
|
|
386
|
+
pgmb.create_message_id(
|
|
387
|
+
clock_timestamp() + (interval ''1 second'') * (t.headers->''retriesLeftS''->0)::int,
|
|
388
|
+
rn
|
|
389
|
+
),
|
|
390
|
+
t.message,
|
|
391
|
+
t.headers
|
|
392
|
+
-- set retriesLeftS to the next retry
|
|
393
|
+
|| jsonb_build_object(''retriesLeftS'', (t.headers->''retriesLeftS'') #- ''{0}'')
|
|
394
|
+
-- set the originalMessageId
|
|
395
|
+
-- to the original message ID if it exists
|
|
396
|
+
|| jsonb_build_object(
|
|
397
|
+
''originalMessageId'', COALESCE(t.headers->''originalMessageId'', to_jsonb(t.id))
|
|
398
|
+
)
|
|
399
|
+
-- set the tries
|
|
400
|
+
|| jsonb_build_object(
|
|
401
|
+
''tries'',
|
|
402
|
+
CASE
|
|
403
|
+
WHEN jsonb_typeof(t.headers->''tries'') = ''number'' THEN
|
|
404
|
+
to_jsonb((t.headers->>''tries'')::INTEGER + 1)
|
|
405
|
+
ELSE
|
|
406
|
+
to_jsonb(1)
|
|
407
|
+
END
|
|
408
|
+
)
|
|
409
|
+
FROM (select *, row_number() over () AS rn FROM deleted_msgs) t
|
|
410
|
+
WHERE jsonb_typeof(t.headers -> ''retriesLeftS'' -> 0) = ''number''
|
|
411
|
+
RETURNING id
|
|
412
|
+
),
|
|
413
|
+
requeued_notify AS (
|
|
414
|
+
SELECT pg_notify(
|
|
415
|
+
''chn_' || queue_name || ''',
|
|
416
|
+
''{"count":'' || (select count(*) from requeued)::varchar || ''}''
|
|
417
|
+
)
|
|
418
|
+
)
|
|
419
|
+
';
|
|
420
|
+
END IF;
|
|
421
|
+
|
|
422
|
+
IF ack_setting = 'archive' THEN
|
|
423
|
+
-- Delete the messages from live_messages and insert them into
|
|
424
|
+
-- consumed_messages in one operation,
|
|
425
|
+
-- if the queue's ack_setting is set to 'archive'
|
|
426
|
+
query_str := query_str || ',
|
|
427
|
+
archived_records AS (
|
|
428
|
+
INSERT INTO ' || quote_ident(schema_name) || '.consumed_messages
|
|
429
|
+
(id, message, headers, success)
|
|
430
|
+
SELECT t.id, t.message, t.headers, $2::boolean
|
|
431
|
+
FROM deleted_msgs t
|
|
432
|
+
)';
|
|
433
|
+
END IF;
|
|
434
|
+
|
|
435
|
+
query_str := query_str || '
|
|
436
|
+
SELECT COUNT(*) FROM deleted_msgs';
|
|
437
|
+
|
|
438
|
+
EXECUTE query_str USING ids, success INTO deleted_msg_count;
|
|
439
|
+
|
|
440
|
+
-- Raise exception if no rows were affected
|
|
441
|
+
IF deleted_msg_count != array_length(ids, 1) THEN
|
|
442
|
+
RAISE EXCEPTION 'Only removed % out of % expected message(s).',
|
|
443
|
+
deleted_msg_count, array_length(ids, 1);
|
|
444
|
+
END IF;
|
|
445
|
+
END
|
|
446
|
+
$$ LANGUAGE plpgsql;
|
|
447
|
+
|
|
448
|
+
-- fn to read the next available messages from the queue
|
|
449
|
+
-- the messages read, will remain invisible to other consumers
|
|
450
|
+
-- until the transaction is either committed or rolled back.
|
|
451
|
+
CREATE OR REPLACE FUNCTION pgmb.read_from_queue(
|
|
452
|
+
queue_name VARCHAR(64),
|
|
453
|
+
limit_count INTEGER DEFAULT 1
|
|
454
|
+
)
|
|
455
|
+
RETURNS SETOF pgmb.msg_record AS $$
|
|
456
|
+
DECLARE
|
|
457
|
+
schema_name VARCHAR(64);
|
|
458
|
+
BEGIN
|
|
459
|
+
-- get schema name
|
|
460
|
+
SELECT q.schema_name FROM pgmb.queues q
|
|
461
|
+
WHERE q.name = queue_name INTO schema_name;
|
|
462
|
+
-- read the messages from the queue
|
|
463
|
+
RETURN QUERY EXECUTE 'SELECT id, message, headers
|
|
464
|
+
FROM ' || quote_ident(schema_name) || '.live_messages
|
|
465
|
+
WHERE id <= pgmb.get_max_message_id()
|
|
466
|
+
ORDER BY id ASC
|
|
467
|
+
FOR UPDATE SKIP LOCKED
|
|
468
|
+
LIMIT $1'
|
|
469
|
+
USING limit_count;
|
|
470
|
+
END
|
|
471
|
+
$$ LANGUAGE plpgsql;
|
|
472
|
+
|
|
473
|
+
-- fn to publish a message to 1 or more exchanges.
|
|
474
|
+
-- Will find all queues subscribed to it and insert the message into
|
|
475
|
+
-- each of them.
|
|
476
|
+
-- Each queue will receive a copy of the message, with the exchange name
|
|
477
|
+
-- added to the headers. The ID of the message will remain the same
|
|
478
|
+
-- across all queues.
|
|
479
|
+
-- @returns ID of the published message, if sent to any queues -- NULL at the
|
|
480
|
+
-- index of the messages that were not sent to any queues.
|
|
481
|
+
CREATE OR REPLACE FUNCTION pgmb.publish(
|
|
482
|
+
messages pgmb.publish_msg[]
|
|
483
|
+
)
|
|
484
|
+
RETURNS SETOF VARCHAR(22) AS $$
|
|
485
|
+
DECLARE
|
|
486
|
+
start_rand constant BIGINT = pgmb.create_random_bigint();
|
|
487
|
+
BEGIN
|
|
488
|
+
-- Create message IDs for each message, then we'll send them to the individual
|
|
489
|
+
-- queues. The ID will be the same for all queues, but the headers may vary
|
|
490
|
+
-- across queues.
|
|
491
|
+
RETURN QUERY
|
|
492
|
+
WITH msg_records AS (
|
|
493
|
+
SELECT
|
|
494
|
+
pgmb.create_message_id(
|
|
495
|
+
COALESCE(consume_at, clock_timestamp()),
|
|
496
|
+
start_rand + ordinality
|
|
497
|
+
) AS id,
|
|
498
|
+
message,
|
|
499
|
+
JSONB_SET(
|
|
500
|
+
COALESCE(headers, '{}'::JSONB),
|
|
501
|
+
'{exchange}',
|
|
502
|
+
TO_JSONB(exchange)
|
|
503
|
+
) as headers,
|
|
504
|
+
exchange,
|
|
505
|
+
ordinality
|
|
506
|
+
FROM unnest(messages) WITH ORDINALITY
|
|
507
|
+
),
|
|
508
|
+
sends AS (
|
|
509
|
+
SELECT
|
|
510
|
+
pgmb._send(
|
|
511
|
+
q.queue_name,
|
|
512
|
+
ARRAY_AGG((m.id, m.message, m.headers)::pgmb.msg_record)
|
|
513
|
+
) as id
|
|
514
|
+
FROM msg_records m,
|
|
515
|
+
LATERAL (
|
|
516
|
+
SELECT DISTINCT name, unnest(queues) AS queue_name
|
|
517
|
+
FROM pgmb.exchanges e
|
|
518
|
+
WHERE e.name = m.exchange
|
|
519
|
+
) q
|
|
520
|
+
GROUP BY q.queue_name
|
|
521
|
+
)
|
|
522
|
+
-- we'll select an aggregate of "sends", to ensure that each "send" call
|
|
523
|
+
-- is executed. If this is not done, PG may optimize the query
|
|
524
|
+
-- and not execute the "sends" CTE at all, resulting in no messages being sent.
|
|
525
|
+
-- So, this aggregate call ensures PG does not optimize it away.
|
|
526
|
+
SELECT
|
|
527
|
+
CASE WHEN count(*) FILTER (WHERE sends.id IS NOT NULL) > 0 THEN m.id END
|
|
528
|
+
FROM msg_records m
|
|
529
|
+
LEFT JOIN sends ON sends.id = m.id
|
|
530
|
+
GROUP BY m.id, m.ordinality
|
|
531
|
+
ORDER BY m.ordinality;
|
|
532
|
+
END
|
|
533
|
+
$$ LANGUAGE plpgsql;
|
|
534
|
+
|
|
535
|
+
-- get the metrics of a queue.
|
|
536
|
+
-- Pass "approximate" as true to get an approximate count of the messages, which
|
|
537
|
+
-- is much faster than counting all rows. "consumable_length" will always be 0
|
|
538
|
+
-- when "approximate" is passed as true.
|
|
539
|
+
CREATE OR REPLACE FUNCTION pgmb.get_queue_metrics(
|
|
540
|
+
queue_name VARCHAR(64),
|
|
541
|
+
approximate BOOLEAN DEFAULT FALSE
|
|
542
|
+
)
|
|
543
|
+
RETURNS SETOF pgmb.metrics_result AS $$
|
|
544
|
+
DECLARE
|
|
545
|
+
schema_name VARCHAR(64);
|
|
546
|
+
BEGIN
|
|
547
|
+
-- get schema name
|
|
548
|
+
SELECT q.schema_name FROM pgmb.queues q
|
|
549
|
+
WHERE q.name = queue_name INTO schema_name;
|
|
550
|
+
-- get the metrics of the queue
|
|
551
|
+
RETURN QUERY EXECUTE 'SELECT
|
|
552
|
+
''' || queue_name || '''::varchar(64) AS queue_name,
|
|
553
|
+
' ||
|
|
554
|
+
(CASE WHEN approximate THEN
|
|
555
|
+
'COALESCE(pgmb.get_approximate_count(' || quote_literal(schema_name || '.live_messages') || '), 0) AS total_length,'
|
|
556
|
+
|| '0 AS consumable_length,'
|
|
557
|
+
ELSE
|
|
558
|
+
'count(*)::int AS total_length,'
|
|
559
|
+
|| '(count(*) FILTER (WHERE id <= pgmb.get_max_message_id()))::int AS consumable_length,'
|
|
560
|
+
END) || '
|
|
561
|
+
(clock_timestamp() - pgmb.extract_date_from_message_id(max(id))) AS newest_msg_age_sec,
|
|
562
|
+
(clock_timestamp() - pgmb.extract_date_from_message_id(min(id))) AS oldest_msg_age_sec
|
|
563
|
+
FROM ' || quote_ident(schema_name) || '.live_messages';
|
|
564
|
+
END
|
|
565
|
+
$$ LANGUAGE plpgsql;
|
|
566
|
+
|
|
567
|
+
-- fn to get metrics for all queues
|
|
568
|
+
CREATE OR REPLACE FUNCTION pgmb.get_all_queue_metrics(
|
|
569
|
+
approximate BOOLEAN DEFAULT FALSE
|
|
570
|
+
)
|
|
571
|
+
RETURNS SETOF pgmb.metrics_result AS $$
|
|
572
|
+
BEGIN
|
|
573
|
+
RETURN QUERY
|
|
574
|
+
SELECT m.*
|
|
575
|
+
FROM pgmb.queues q, pgmb.get_queue_metrics(q.name, approximate) m
|
|
576
|
+
ORDER BY q.name ASC;
|
|
577
|
+
END
|
|
578
|
+
$$ LANGUAGE plpgsql;
|
|
579
|
+
|
|
580
|
+
-- fn to get an approximate count of rows in a table
|
|
581
|
+
-- See: https://stackoverflow.com/a/7945274
|
|
582
|
+
CREATE OR REPLACE FUNCTION pgmb.get_approximate_count(
|
|
583
|
+
table_name regclass
|
|
584
|
+
)
|
|
585
|
+
RETURNS INTEGER AS $$
|
|
586
|
+
SELECT (
|
|
587
|
+
CASE WHEN c.reltuples < 0 THEN NULL -- never vacuumed
|
|
588
|
+
WHEN c.relpages = 0 THEN float8 '0' -- empty table
|
|
589
|
+
ELSE c.reltuples / c.relpages END
|
|
590
|
+
* (pg_catalog.pg_relation_size(c.oid)
|
|
591
|
+
/ pg_catalog.current_setting('block_size')::int)
|
|
592
|
+
)::bigint
|
|
593
|
+
FROM pg_catalog.pg_class c
|
|
594
|
+
WHERE c.oid = table_name
|
|
595
|
+
LIMIT 1;
|
|
596
|
+
$$ LANGUAGE sql;
|
|
597
|
+
|
|
598
|
+
-- uninstall pgmb
|
|
599
|
+
CREATE OR REPLACE FUNCTION pgmb.uninstall()
|
|
600
|
+
RETURNS VOID AS $$
|
|
601
|
+
DECLARE
|
|
602
|
+
schema_name VARCHAR(64);
|
|
603
|
+
BEGIN
|
|
604
|
+
-- find all queues and drop their schemas
|
|
605
|
+
FOR schema_name IN (SELECT q.schema_name FROM pgmb.queues q) LOOP
|
|
606
|
+
-- drop the schemas of queues
|
|
607
|
+
EXECUTE 'DROP SCHEMA ' || quote_ident(schema_name) || ' CASCADE';
|
|
608
|
+
END LOOP;
|
|
609
|
+
-- drop the schema
|
|
610
|
+
DROP SCHEMA pgmb CASCADE;
|
|
611
|
+
END
|
|
612
|
+
$$ LANGUAGE plpgsql;
|