pg_versions 1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/MIT-LICENSE +20 -0
- data/create-table.sql +1 -0
- data/db/migrate/1_create_pg_versions_table.rb +11 -0
- data/drop-table.sql +1 -0
- data/lib/pg_versions/pg_versions.rb +321 -0
- data/lib/pg_versions/rails.rb +7 -0
- data/lib/pg_versions/version.rb +3 -0
- data/lib/pg_versions.rb +5 -0
- metadata +107 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 272ebc261a3bbb1c96eb4545b2a215a55366b950ead6dd4b4547dbcf1ee6ea6a
|
4
|
+
data.tar.gz: 5ba78eabb8f3fca3b7d1302b3e032660c625c294ae6cef31eed252e801558acb
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 2fb87931f3595b7938328461397901244ae2d357f80b93e5698bc813ba25e4ee542228afd68e17e2e3fc96e523ac0f86a22a6fc24e1bad81eb5557020b330716
|
7
|
+
data.tar.gz: 7ecac0582c70f42295e870c4dcb8df35f23b94e9f5e5461717f50e4a8d8b561cc60513bf67eb02f9918f2b5887b2d70b4f9a59226cdf6a99cb08ebb9831d5234
|
data/MIT-LICENSE
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
Copyright 2020-2021 yunta
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
4
|
+
a copy of this software and associated documentation files (the
|
5
|
+
"Software"), to deal in the Software without restriction, including
|
6
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
7
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
8
|
+
permit persons to whom the Software is furnished to do so, subject to
|
9
|
+
the following conditions:
|
10
|
+
|
11
|
+
The above copyright notice and this permission notice shall be
|
12
|
+
included in all copies or substantial portions of the Software.
|
13
|
+
|
14
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
18
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
19
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
20
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/create-table.sql
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
CREATE UNLOGGED TABLE pg_versions (channel text NOT NULL PRIMARY KEY, instant timestamptz NOT NULL, counter smallint NOT NULL);
|
data/drop-table.sql
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
DROP TABLE IF EXISTS pg_versions;
|
@@ -0,0 +1,321 @@
|
|
1
|
+
# Operations on versions:
|
2
|
+
# 1. bump - increase or assert version. ensures new version is unique and higher than any previous version, even if table entry existed before and got removed
|
3
|
+
# 2. read - always returns a version, even if table entry is missing
|
4
|
+
# 3. remove (clean) - for periodic pruning of old entries
|
5
|
+
#
|
6
|
+
# A "version" (and database schema) contains both: timestamp and a counter (smallint).
|
7
|
+
#
|
8
|
+
# We need the timestamp because entries may vanish from the table (either the table is unlogged and a crash occured, or entries were removed for periodic cleanup).
|
9
|
+
# If an entry has vanished, and bump or read wants to generate a new version, they have to ensure that the new version is higher than any version the key may have had before.
|
10
|
+
# Using current timestamp of microsecond precision gives near-certainty of having unique first component of the version.
|
11
|
+
# There remains a theoretical risk that a timestamp was placed in the table, then got deleted (periodic cleanup), and then bump or read generated identical timestamp.
|
12
|
+
# All those actions would have to happen in the same microsecond, which, having accurate time source, is likely impossible with postgres (yet!).
|
13
|
+
# To eliminate the risk, delete operation on the table should be followed by at least one microsecond of sleep (or longer if time source is of lower precission), while holding an exclusive lock on the table.
|
14
|
+
# Bumps and reads always acquire shared lock on the table before reading the timestamp, to ensure that no delete operation gets executed between timestamp generation and version bump.
|
15
|
+
# Also, if using unlogged table, administrator should ensure that new database instance will not restart, recover, and resume servicing requests faster than one microsecond after the crash XD
|
16
|
+
#
|
17
|
+
# Besides the timestamp, we also need the counter. Counter helps solving two scenarios:
|
18
|
+
# 1) Hypothetical: Two transactions read out the timestamp at the same microsecond. Without the counter the versions asserted by both of those transactions would be identical.
|
19
|
+
# 2) Practical: Timestamp read-out and row-update are separate operations. It's possible that entire other bump fits between them, leading to assertion of lower version by the transaction which started first and ended last.
|
20
|
+
# With the counter, bump operation can detect that its timestamp is older than the one already present in the row, and bump the counter instead, leaving newer timestamp in place.
|
21
|
+
# TODO: think if early row lock wouldn't be better
|
22
|
+
#
|
23
|
+
# Read of non-existent row should trigger a bump and reread. Otherwise following scenario may happen:
|
24
|
+
# Empty table. Bump starts, gets timestamp, loses cpu. Reader reads, gets nothing, reports (current) timestamp higher than the one acquired by bump. Bump commits. Subsequent read reports older timestamp than previous read (loss of monotonicity).
|
25
|
+
#
|
26
|
+
|
27
|
+
|
28
|
+
require 'set'
|
29
|
+
|
30
|
+
#TODO: prepared statements?
|
31
|
+
|
32
|
+
module PgVersions
|
33
|
+
|
34
|
+
|
35
|
+
class ConnectionAcquisitionFailedError < StandardError; end
|
36
|
+
|
37
|
+
def self.timestamp_to_integers(input)
|
38
|
+
"to_char(%s, 'YYYYMMDD')::integer || ',' || to_char(%s, 'HH24MISS')::integer || ',' || to_char(%s, 'US')::integer"%[input, input, input]
|
39
|
+
end
|
40
|
+
|
41
|
+
|
42
|
+
def self.with_connection(pg_connection)
|
43
|
+
if pg_connection
|
44
|
+
yield pg_connection
|
45
|
+
elsif defined? ActiveRecord
|
46
|
+
ActiveRecord::Base.connection_pool.with_connection { |ar_connection|
|
47
|
+
yield ar_connection.instance_variable_get(:@connection)
|
48
|
+
}
|
49
|
+
else
|
50
|
+
raise ConnectionAcquisitionFailedError, "Missing connection. Either pass pg connection object or import ActiveRecord."
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
|
55
|
+
def self.string_to_version(version_str)
|
56
|
+
version_str.split(",").map { |str| Integer(str) }
|
57
|
+
end
|
58
|
+
|
59
|
+
|
60
|
+
def self.create_table(connection=nil)
|
61
|
+
PgVersions.with_connection(connection) { |pg_connection|
|
62
|
+
open(File.dirname(__FILE__)+"/../../create-table.sql") { |sql_file|
|
63
|
+
pg_connection.exec sql_file.read
|
64
|
+
}
|
65
|
+
}
|
66
|
+
end
|
67
|
+
|
68
|
+
def self.drop_table(connection=nil)
|
69
|
+
PgVersions.with_connection(connection) { |pg_connection|
|
70
|
+
open(File.dirname(__FILE__)+"/../../drop-table.sql") { |sql_file|
|
71
|
+
pg_connection.exec sql_file.read
|
72
|
+
}
|
73
|
+
}
|
74
|
+
end
|
75
|
+
|
76
|
+
#TODO: ensure this is called only once per transaction, or that all bumps occur in the same order in all transactions, to avoid deadlocks
|
77
|
+
def self.bump(*channels, connection: nil)
|
78
|
+
PgVersions.with_connection(connection) { |pg_connection|
|
79
|
+
channels = [channels].flatten.sort
|
80
|
+
return {} if channels.size == 0
|
81
|
+
quoted_channels = channels.map.with_index { |channel, i| "(#{i},'#{pg_connection.escape_string(channel)}')" }.join(", ")
|
82
|
+
# table-wide share lock is there to mutually exclude table cleaner
|
83
|
+
# clock_timestamp() - this has to be a timestamp after table lock got acquired
|
84
|
+
pg_connection.exec("
|
85
|
+
LOCK TABLE pg_versions IN ACCESS SHARE MODE;
|
86
|
+
WITH
|
87
|
+
to_bump(i, channel) AS (VALUES #{quoted_channels})
|
88
|
+
, current_instant(ts) AS (VALUES (clock_timestamp()))
|
89
|
+
, updated AS (
|
90
|
+
INSERT INTO pg_versions(channel, instant, counter)
|
91
|
+
SELECT to_bump.channel, (SELECT ts FROM current_instant), 0 FROM to_bump
|
92
|
+
ON CONFLICT (channel) DO UPDATE SET
|
93
|
+
instant = GREATEST(pg_versions.instant, EXCLUDED.instant),
|
94
|
+
counter = CASE WHEN pg_versions.instant < EXCLUDED.instant THEN 0 ELSE pg_versions.counter + 1 END
|
95
|
+
RETURNING channel, instant, pg_versions.counter
|
96
|
+
)
|
97
|
+
SELECT DISTINCT
|
98
|
+
i
|
99
|
+
, #{timestamp_to_integers('updated.instant')} || ',' || updated.counter::text AS version
|
100
|
+
, pg_notify(updated.channel::text, #{timestamp_to_integers('updated.instant')} || ',' || updated.counter::text)::text
|
101
|
+
FROM
|
102
|
+
to_bump
|
103
|
+
JOIN updated ON to_bump.channel = updated.channel;
|
104
|
+
").map { |row| [channels[Integer(row["i"])], string_to_version(row["version"])] }.to_h
|
105
|
+
}
|
106
|
+
end
|
107
|
+
|
108
|
+
|
109
|
+
def self.read(*channels, connection: nil)
|
110
|
+
PgVersions.with_connection(connection) { |pg_connection|
|
111
|
+
channels = [channels].flatten.sort
|
112
|
+
return {} if channels.size == 0
|
113
|
+
versions = {}
|
114
|
+
quoted_channels = channels.map.with_index { |channel, i| "(#{i},'#{pg_connection.escape_string(channel)}')" }.join(", ")
|
115
|
+
not_found_channels = pg_connection.exec("
|
116
|
+
LOCK TABLE pg_versions IN ACCESS SHARE MODE;
|
117
|
+
WITH
|
118
|
+
channels(i, channel) AS (VALUES #{quoted_channels})
|
119
|
+
SELECT
|
120
|
+
i
|
121
|
+
, #{timestamp_to_integers('instant')} || ',' || counter AS version
|
122
|
+
FROM
|
123
|
+
channels
|
124
|
+
JOIN pg_versions ON pg_versions.channel = channels.channel
|
125
|
+
ORDER BY
|
126
|
+
i DESC;
|
127
|
+
").each { |row|
|
128
|
+
versions[channels.delete_at(Integer(row["i"]))] = string_to_version(row["version"])
|
129
|
+
}
|
130
|
+
#TODO: bump in the same query instead of calling bump
|
131
|
+
versions.merge!(self.bump(channels, connection: pg_connection)) if channels.size > 0
|
132
|
+
versions
|
133
|
+
}
|
134
|
+
end
|
135
|
+
|
136
|
+
|
137
|
+
class Notification
|
138
|
+
attr_reader :channel, :version
|
139
|
+
def initialize(channel,version)
|
140
|
+
@channel, @version = channel, version
|
141
|
+
end
|
142
|
+
end
|
143
|
+
|
144
|
+
|
145
|
+
class Connection
|
146
|
+
|
147
|
+
def initialize(connection=nil)
|
148
|
+
@actor_commands = Queue.new
|
149
|
+
actor_notify_r, @actor_notify_w = IO.pipe
|
150
|
+
|
151
|
+
connection_error_queue = Queue.new
|
152
|
+
@actor = Thread.new {
|
153
|
+
begin
|
154
|
+
PgVersions.with_connection(connection) { |pg_connection|
|
155
|
+
connection_error_queue << false
|
156
|
+
subscribers = Hash.new { |h,k| h[k] = Set.new }
|
157
|
+
loop {
|
158
|
+
#TODO: handle errors
|
159
|
+
reads,_writes,_errors = IO::select([pg_connection.socket_io, actor_notify_r])
|
160
|
+
|
161
|
+
if reads.include?(pg_connection.socket_io)
|
162
|
+
pg_connection.consume_input
|
163
|
+
end
|
164
|
+
|
165
|
+
if reads.include?(actor_notify_r)
|
166
|
+
@actor_commands.shift.call(pg_connection, subscribers)
|
167
|
+
actor_notify_r.read(1)
|
168
|
+
end
|
169
|
+
|
170
|
+
while notification = pg_connection.notifies
|
171
|
+
channel, payload = notification[:relname], notification[:extra]
|
172
|
+
subscribers[channel].each { |subscriber|
|
173
|
+
subscriber.notify(channel, PgVersions.string_to_version(payload))
|
174
|
+
}
|
175
|
+
end
|
176
|
+
}
|
177
|
+
}
|
178
|
+
rescue ConnectionAcquisitionFailedError => e
|
179
|
+
connection_error_queue << e
|
180
|
+
end
|
181
|
+
}
|
182
|
+
(connection_error = connection_error_queue.shift) and raise connection_error
|
183
|
+
end
|
184
|
+
|
185
|
+
|
186
|
+
def actor_call(&block)
|
187
|
+
done = Queue.new
|
188
|
+
@actor_commands << proc { |pg_connection, subscribers|
|
189
|
+
done << block.call(pg_connection, subscribers)
|
190
|
+
}
|
191
|
+
@actor_notify_w.write('!')
|
192
|
+
done.shift
|
193
|
+
end
|
194
|
+
|
195
|
+
|
196
|
+
def bump(*channels)
|
197
|
+
actor_call { |pg_connection, _subscribers|
|
198
|
+
PgVersions.bump(channels, connection: pg_connection)
|
199
|
+
}
|
200
|
+
end
|
201
|
+
|
202
|
+
|
203
|
+
def read(*channels)
|
204
|
+
actor_call { |pg_connection, _subscribers|
|
205
|
+
PgVersions.read(channels, connection: pg_connection)
|
206
|
+
}
|
207
|
+
end
|
208
|
+
|
209
|
+
|
210
|
+
def subscribe(*channels, known: {})
|
211
|
+
subscription = Subscription.new(self)
|
212
|
+
subscription.subscribe([channels].flatten, known: known)
|
213
|
+
subscription
|
214
|
+
end
|
215
|
+
|
216
|
+
|
217
|
+
class Subscription
|
218
|
+
|
219
|
+
def initialize(connection)
|
220
|
+
@connection = connection
|
221
|
+
@notifications = Queue.new
|
222
|
+
@already_known_versions = Hash.new { |h,k| h[k] = [] }
|
223
|
+
@channels = Hash.new(0)
|
224
|
+
end
|
225
|
+
|
226
|
+
|
227
|
+
def subscribe(channels, known: {})
|
228
|
+
update_already_known_versions(known)
|
229
|
+
channels = [channels].flatten
|
230
|
+
channels.select! { |channel|
|
231
|
+
(@channels[channel] += 1) == 1
|
232
|
+
}
|
233
|
+
if channels.size > 0
|
234
|
+
@connection.actor_call { |pg_connection, subscribers|
|
235
|
+
channels.each { |channel|
|
236
|
+
subscribers[channel] << self
|
237
|
+
pg_connection.exec("LISTEN #{PG::Connection.quote_ident(channel)}") if subscribers[channel].size == 1
|
238
|
+
}
|
239
|
+
PgVersions.read(channels, connection: pg_connection).each_pair { |channel, version|
|
240
|
+
notify(channel, version)
|
241
|
+
}
|
242
|
+
}
|
243
|
+
end
|
244
|
+
end
|
245
|
+
|
246
|
+
|
247
|
+
def unsubscribe(*channels)
|
248
|
+
channels = [channels].flatten
|
249
|
+
channels.select! { |channel|
|
250
|
+
@channels[channel] -= 1
|
251
|
+
raise "Trying to unsubscribe from channel (%p) more times than it was subscribed to"%[channel] if @channels[channel] < 0
|
252
|
+
@channels.delete(channel) if @channels[channel] == 0
|
253
|
+
not @channels.has_key?(channel)
|
254
|
+
}
|
255
|
+
@connection.actor_call { |pg_connection, subscribers|
|
256
|
+
channels.each { |channel|
|
257
|
+
subscribers[channel].delete(self)
|
258
|
+
if subscribers[channel].size == 0
|
259
|
+
pg_connection.exec("UNLISTEN #{PG::Connection.quote_ident(channel)}")
|
260
|
+
subscribers.delete(channel)
|
261
|
+
end
|
262
|
+
}
|
263
|
+
}
|
264
|
+
end
|
265
|
+
|
266
|
+
|
267
|
+
def read(*channels, notify: true)
|
268
|
+
channels = @channels.keys if channels.size == 0
|
269
|
+
versions = @connection.actor_call { |pg_connection, subscribers|
|
270
|
+
PgVersions.read(channels, connection: pg_connection)
|
271
|
+
}
|
272
|
+
update_already_known_versions(versions) if not notify
|
273
|
+
versions
|
274
|
+
end
|
275
|
+
|
276
|
+
|
277
|
+
def bump(*channels, notify: true)
|
278
|
+
channels = @channels.keys if channels.size == 0
|
279
|
+
versions = @connection.actor_call { |pg_connection, subscribers|
|
280
|
+
PgVersions.bump(channels, connection: pg_connection)
|
281
|
+
}
|
282
|
+
update_already_known_versions(versions) if not notify
|
283
|
+
versions
|
284
|
+
end
|
285
|
+
|
286
|
+
|
287
|
+
def wait(new_already_known_versions = {})
|
288
|
+
update_already_known_versions(new_already_known_versions)
|
289
|
+
loop {
|
290
|
+
channel, version = @notifications.shift
|
291
|
+
return nil if not channel #termination
|
292
|
+
if (@already_known_versions[channel] <=> version) == -1
|
293
|
+
@already_known_versions[channel] = version
|
294
|
+
return Notification.new(channel, version)
|
295
|
+
end
|
296
|
+
}
|
297
|
+
end
|
298
|
+
|
299
|
+
|
300
|
+
def notify(channel, payload)
|
301
|
+
@notifications << [channel, payload]
|
302
|
+
end
|
303
|
+
|
304
|
+
|
305
|
+
def drop
|
306
|
+
@notifications << [nil, nil]
|
307
|
+
unsubscribe(@channels.keys)
|
308
|
+
end
|
309
|
+
|
310
|
+
|
311
|
+
def update_already_known_versions(new_already_known_versions)
|
312
|
+
new_already_known_versions.each { |channel, version|
|
313
|
+
@already_known_versions[channel] = version if (version <=> @already_known_versions[channel]) == 1
|
314
|
+
}
|
315
|
+
end
|
316
|
+
|
317
|
+
end
|
318
|
+
|
319
|
+
end
|
320
|
+
|
321
|
+
end
|
data/lib/pg_versions.rb
ADDED
metadata
ADDED
@@ -0,0 +1,107 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: pg_versions
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: '1.0'
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- yunta
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2022-01-16 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: rspec
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - "~>"
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '3.10'
|
20
|
+
type: :development
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "~>"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '3.10'
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
name: simplecov
|
29
|
+
requirement: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - "~>"
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: '0.21'
|
34
|
+
type: :development
|
35
|
+
prerelease: false
|
36
|
+
version_requirements: !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - "~>"
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '0.21'
|
41
|
+
- !ruby/object:Gem::Dependency
|
42
|
+
name: activerecord
|
43
|
+
requirement: !ruby/object:Gem::Requirement
|
44
|
+
requirements:
|
45
|
+
- - "~>"
|
46
|
+
- !ruby/object:Gem::Version
|
47
|
+
version: '7.0'
|
48
|
+
type: :development
|
49
|
+
prerelease: false
|
50
|
+
version_requirements: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - "~>"
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: '7.0'
|
55
|
+
- !ruby/object:Gem::Dependency
|
56
|
+
name: pg
|
57
|
+
requirement: !ruby/object:Gem::Requirement
|
58
|
+
requirements:
|
59
|
+
- - "~>"
|
60
|
+
- !ruby/object:Gem::Version
|
61
|
+
version: '1.2'
|
62
|
+
type: :runtime
|
63
|
+
prerelease: false
|
64
|
+
version_requirements: !ruby/object:Gem::Requirement
|
65
|
+
requirements:
|
66
|
+
- - "~>"
|
67
|
+
- !ruby/object:Gem::Version
|
68
|
+
version: '1.2'
|
69
|
+
description: PostgreSQL notifications with persistence.
|
70
|
+
email:
|
71
|
+
- maciej.blomberg@mikoton.com
|
72
|
+
executables: []
|
73
|
+
extensions: []
|
74
|
+
extra_rdoc_files: []
|
75
|
+
files:
|
76
|
+
- MIT-LICENSE
|
77
|
+
- create-table.sql
|
78
|
+
- db/migrate/1_create_pg_versions_table.rb
|
79
|
+
- drop-table.sql
|
80
|
+
- lib/pg_versions.rb
|
81
|
+
- lib/pg_versions/pg_versions.rb
|
82
|
+
- lib/pg_versions/rails.rb
|
83
|
+
- lib/pg_versions/version.rb
|
84
|
+
homepage: https://gitlab.com/yunta/pg-versions
|
85
|
+
licenses:
|
86
|
+
- MIT
|
87
|
+
metadata: {}
|
88
|
+
post_install_message:
|
89
|
+
rdoc_options: []
|
90
|
+
require_paths:
|
91
|
+
- lib
|
92
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
93
|
+
requirements:
|
94
|
+
- - ">="
|
95
|
+
- !ruby/object:Gem::Version
|
96
|
+
version: '0'
|
97
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
98
|
+
requirements:
|
99
|
+
- - ">="
|
100
|
+
- !ruby/object:Gem::Version
|
101
|
+
version: '0'
|
102
|
+
requirements: []
|
103
|
+
rubygems_version: 3.2.22
|
104
|
+
signing_key:
|
105
|
+
specification_version: 4
|
106
|
+
summary: Persistent timestamped postgres notification library
|
107
|
+
test_files: []
|