pgbus 0.7.9 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d7661d7d684ac911e36b15267b4b6135081fa3bd9ca0a795cfceae7e9977304a
4
- data.tar.gz: 7cdb802918724dafa634925a99c48c9a0ee154356ec807e69ad6ec86cb48f9be
3
+ metadata.gz: 60cc7178f84e5d28919085f5c5a9d824aca958180d433daf13cb04a369ed25c0
4
+ data.tar.gz: 5a7fdb569f90cf3e60ef5951e86d6ed0c3c31c7703b0f88be91c2df3054b8817
5
5
  SHA512:
6
- metadata.gz: 917889d47343e7c8775f6be1c9bc9d72b7ca8a26de356a9434ba1470f0f4f345215e9f016f9bba74b78007c72668254b92715944574fc056b20ceb1af92ebaac
7
- data.tar.gz: 011533444eec2c09e29de3938d84e6fc06efd87c82621efcccf92008d88b3a3ac76464619c959617d022e076da4bed5ca03b50f3b5997e7d81ec9b52ccce90c3
6
+ metadata.gz: 8cc8aa7893bdb605379f4cea9542766062b5a759a89d6149e49a39729484f56790bc131b1063b16a855357a0ab4979aeb52f4bd9f572568e793482fcabaf9939
7
+ data.tar.gz: '052525538d45540220e66da0d43f007727848543d8f1184f7598f481e3abe3fe6be961eb6c634f414c158cd884b856305a35ded77e39d2f0f4d7c24bc10cf1a4'
@@ -131,7 +131,9 @@ module Pgbus
131
131
  return nil if cache[:script_emitted]
132
132
 
133
133
  cache[:script_emitted] = true
134
- script = '<script type="module">import "pgbus/stream_source_element"</script>'
134
+ nonce = content_security_policy_nonce if respond_to?(:content_security_policy_nonce)
135
+ nonce_attr = nonce ? %( nonce="#{CGI.escape_html(nonce)}") : ""
136
+ script = %(<script type="module"#{nonce_attr}>import "pgbus/stream_source_element"</script>)
135
137
  script.respond_to?(:html_safe) ? script.html_safe : script
136
138
  end
137
139
 
@@ -0,0 +1,37 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Pgbus
4
+ class Client
5
+ # Fire-and-forget PG NOTIFY for ephemeral stream broadcasts. No PGMQ
6
+ # queue is created — the payload travels via the Postgres NOTIFY channel
7
+ # only, matching the channel naming convention that PGMQ's trigger uses:
8
+ # pgmq.q_<full_queue_name>.INSERT
9
+ #
10
+ # Subscribers already LISTEN on this channel via the Streamer's Listener.
11
+ # When a subscriber is connected, the StreamEventDispatcher receives the
12
+ # NOTIFY and fans out the payload. When no subscriber is connected,
13
+ # the NOTIFY is silently discarded by Postgres — no queue, no storage,
14
+ # no orphan tables.
15
+ #
16
+ # The payload is JSON-serialized into the NOTIFY's optional payload
17
+ # parameter (max 8000 bytes in Postgres). Broadcasts exceeding this
18
+ # limit will raise a PG::ProgramLimitExceeded error — callers needing
19
+ # large payloads should use durable mode (which inserts into PGMQ).
20
+ module NotifyStream
21
+ def notify_stream(stream_name, payload)
22
+ full_name = config.queue_name(stream_name)
23
+ sanitized = QueueNameValidator.sanitize!(full_name)
24
+ channel = "pgmq.q_#{sanitized}.INSERT"
25
+ json = payload.is_a?(String) ? payload : JSON.generate(payload)
26
+
27
+ Instrumentation.instrument("pgbus.stream.notify", stream: stream_name, bytes: json.bytesize) do
28
+ synchronized do
29
+ @pgmq.with_connection do |conn|
30
+ conn.exec_params("SELECT pg_notify($1, $2)", [channel, json])
31
+ end
32
+ end
33
+ end
34
+ end
35
+ end
36
+ end
37
+ end
data/lib/pgbus/client.rb CHANGED
@@ -3,11 +3,13 @@
3
3
  require "json"
4
4
  require_relative "client/read_after"
5
5
  require_relative "client/ensure_stream_queue"
6
+ require_relative "client/notify_stream"
6
7
 
7
8
  module Pgbus
8
9
  class Client
9
10
  include ReadAfter
10
11
  include EnsureStreamQueue
12
+ include NotifyStream
11
13
 
12
14
  attr_reader :pgmq, :config
13
15
 
@@ -104,7 +104,9 @@ module Pgbus
104
104
  :streams_default_retention, :streams_retention, :streams_heartbeat_interval,
105
105
  :streams_max_connections, :streams_idle_timeout, :streams_listen_health_check_ms,
106
106
  :streams_write_deadline_ms, :streams_falcon_streaming_body,
107
- :streams_stats_enabled, :streams_test_mode
107
+ :streams_stats_enabled, :streams_test_mode,
108
+ :streams_orphan_sweep_interval, :streams_orphan_threshold
109
+ attr_reader :streams_default_broadcast_mode # rubocop:disable Style/AccessorGrouping
108
110
 
109
111
  # AppSignal integration (auto-loaded when ::Appsignal is defined and this is true).
110
112
  # Set to false to opt out without uninstalling the appsignal gem.
@@ -216,6 +218,9 @@ module Pgbus
216
218
  # usually want job stats on and stream stats off, or vice versa.
217
219
  @streams_stats_enabled = false
218
220
  @streams_test_mode = false
221
+ @streams_default_broadcast_mode = :ephemeral
222
+ @streams_orphan_sweep_interval = 3600 # 1 hour
223
+ @streams_orphan_threshold = 86_400 # 24 hours
219
224
 
220
225
  # AppSignal: auto-on when the appsignal gem is loaded; probe runs in
221
226
  # the same process, so the operator can disable it independently.
@@ -264,6 +269,18 @@ module Pgbus
264
269
  end
265
270
  end
266
271
 
272
+ VALID_BROADCAST_MODES = %i[ephemeral durable].freeze
273
+
274
+ def streams_default_broadcast_mode=(mode)
275
+ mode = mode.to_sym
276
+ unless VALID_BROADCAST_MODES.include?(mode)
277
+ raise ArgumentError,
278
+ "Invalid streams_default_broadcast_mode: #{mode}. Must be one of: #{VALID_BROADCAST_MODES.join(", ")}"
279
+ end
280
+
281
+ @streams_default_broadcast_mode = mode
282
+ end
283
+
267
284
  VALID_PGMQ_SCHEMA_MODES = %i[auto extension embedded].freeze
268
285
 
269
286
  def pgmq_schema_mode=(mode)
@@ -343,6 +360,15 @@ module Pgbus
343
360
  end
344
361
 
345
362
  raise ArgumentError, "streams_retention must be a Hash" unless streams_retention.is_a?(Hash)
363
+
364
+ if streams_orphan_sweep_interval && !(streams_orphan_sweep_interval.is_a?(Numeric) && streams_orphan_sweep_interval.positive?)
365
+ raise ArgumentError, "streams_orphan_sweep_interval must be a positive number or nil to disable"
366
+ end
367
+
368
+ return if streams_orphan_threshold.nil?
369
+ return if streams_orphan_threshold.is_a?(Numeric) && streams_orphan_threshold.positive?
370
+
371
+ raise ArgumentError, "streams_orphan_threshold must be a positive number or nil to disable"
346
372
  end
347
373
 
348
374
  # Set the worker capsule list. Accepts:
@@ -15,6 +15,7 @@ module Pgbus
15
15
  OUTBOX_CLEANUP_INTERVAL = 3600 # Run outbox cleanup every hour
16
16
  JOB_LOCK_CLEANUP_INTERVAL = 300 # Run job lock cleanup every 5 minutes
17
17
  STATS_CLEANUP_INTERVAL = 3600 # Run stats cleanup every hour
18
+ ORPHAN_STREAM_SWEEP_INTERVAL = 3600 # Run orphan stream sweep every hour
18
19
  TABLE_MAINTENANCE_INTERVAL = Pgbus::TableMaintenance::MAINTENANCE_INTERVAL
19
20
 
20
21
  # Page size for archive compaction. Each cycle deletes up to this
@@ -38,6 +39,7 @@ module Pgbus
38
39
  @last_outbox_cleanup_at = monotonic_now
39
40
  @last_job_lock_cleanup_at = monotonic_now
40
41
  @last_stats_cleanup_at = monotonic_now
42
+ @last_orphan_stream_sweep_at = monotonic_now
41
43
  @last_table_maintenance_at = monotonic_now
42
44
  end
43
45
 
@@ -86,6 +88,8 @@ module Pgbus
86
88
  run_if_due(now, :@last_outbox_cleanup_at, OUTBOX_CLEANUP_INTERVAL) { cleanup_outbox }
87
89
  run_if_due(now, :@last_job_lock_cleanup_at, JOB_LOCK_CLEANUP_INTERVAL) { cleanup_job_locks }
88
90
  run_if_due(now, :@last_stats_cleanup_at, STATS_CLEANUP_INTERVAL) { cleanup_stats }
91
+ sweep_interval = config.streams_orphan_sweep_interval
92
+ run_if_due(now, :@last_orphan_stream_sweep_at, sweep_interval) { sweep_orphan_streams } if sweep_interval
89
93
  run_if_due(now, :@last_table_maintenance_at, TABLE_MAINTENANCE_INTERVAL) { run_table_maintenance }
90
94
  end
91
95
 
@@ -315,6 +319,40 @@ module Pgbus
315
319
  config.streams_default_retention.to_f
316
320
  end
317
321
 
322
+ def sweep_orphan_streams
323
+ prefix = config.streams_queue_prefix
324
+ return if prefix.nil? || prefix.empty?
325
+
326
+ threshold = config.streams_orphan_threshold
327
+ return unless threshold
328
+
329
+ conn = config.connects_to ? Pgbus::BusRecord.connection : ActiveRecord::Base.connection
330
+ queue_names = conn.select_values("SELECT queue_name FROM pgmq.meta ORDER BY queue_name")
331
+
332
+ dropped = 0
333
+ queue_names.each do |full_name|
334
+ next unless full_name.start_with?("#{prefix}_")
335
+
336
+ row = conn.select_one(<<~SQL, "Pgbus Orphan Check")
337
+ SELECT count(*) AS queue_length
338
+ FROM pgmq.q_#{QueueNameValidator.sanitize!(full_name)}
339
+ SQL
340
+
341
+ next unless row
342
+ next if row["queue_length"].to_i.positive?
343
+
344
+ Pgbus.client.drop_queue(full_name, prefixed: false)
345
+ dropped += 1
346
+ Pgbus.logger.info { "[Pgbus] Dropped orphan stream queue: #{full_name}" }
347
+ rescue StandardError => e
348
+ Pgbus.logger.warn { "[Pgbus] Orphan stream sweep failed for #{full_name}: #{e.message}" }
349
+ end
350
+
351
+ Pgbus.logger.debug { "[Pgbus] Orphan stream sweep complete: dropped #{dropped} queue(s)" } if dropped.positive?
352
+ rescue StandardError => e
353
+ Pgbus.logger.warn { "[Pgbus] Orphan stream sweep failed: #{e.message}" }
354
+ end
355
+
318
356
  def cleanup_recurring_executions
319
357
  retention = config.recurring_execution_retention
320
358
  return unless retention&.positive?
@@ -36,7 +36,8 @@ module Pgbus
36
36
  module TurboBroadcastable
37
37
  def broadcast_stream_to(*streamables, content:)
38
38
  name = stream_name_from(streamables)
39
- Pgbus.stream(name).broadcast(content)
39
+ mode = Pgbus.configuration.streams_default_broadcast_mode
40
+ Pgbus.stream(name, durable: mode == :durable).broadcast(content)
40
41
  end
41
42
  end
42
43
 
data/lib/pgbus/streams.rb CHANGED
@@ -37,14 +37,19 @@ module Pgbus
37
37
  class Stream
38
38
  attr_reader :name
39
39
 
40
- def initialize(streamables, client: Pgbus.client)
40
+ def initialize(streamables, client: Pgbus.client, durable: true)
41
41
  @name = self.class.name_from(streamables)
42
42
  self.class.validate_name_length!(@name, streamables)
43
43
  @client = client
44
+ @durable = durable
44
45
  @ensured = false
45
46
  @ensure_mutex = Mutex.new
46
47
  end
47
48
 
49
+ def durable?
50
+ @durable
51
+ end
52
+
48
53
  # Broadcasts a Turbo Stream HTML payload through the pgbus streamer.
49
54
  # PGMQ's `message` column is JSONB, so raw HTML strings can't be passed
50
55
  # directly. We wrap as `{"html": "..."}` on the way in and unwrap in
@@ -69,9 +74,12 @@ module Pgbus
69
74
  # through PGMQ; the predicate itself lives in-process on the
70
75
  # subscriber side and is evaluated per-connection by the Dispatcher.
71
76
  def broadcast(payload, visible_to: nil)
72
- ensure_queue!
73
77
  wrapped = { "html" => payload.to_s }
74
78
  wrapped["visible_to"] = visible_to.to_s if visible_to
79
+
80
+ return broadcast_ephemeral(wrapped) unless @durable
81
+
82
+ ensure_queue!
75
83
  transaction = current_open_transaction
76
84
  instrument_payload = {
77
85
  stream: @name,
@@ -155,6 +163,11 @@ module Pgbus
155
163
 
156
164
  private
157
165
 
166
+ def broadcast_ephemeral(wrapped)
167
+ @client.notify_stream(@name, wrapped)
168
+ nil
169
+ end
170
+
158
171
  def ensure_queue!
159
172
  return if @ensured
160
173
 
data/lib/pgbus/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Pgbus
4
- VERSION = "0.7.9"
4
+ VERSION = "0.8.0"
5
5
  end
@@ -43,11 +43,10 @@ module Pgbus
43
43
  # different connection lifecycle than the worker processes).
44
44
  def queues_with_metrics
45
45
  queue_names = connection.select_values("SELECT queue_name FROM pgmq.meta ORDER BY queue_name")
46
- # paused_queue_names returns an Array; convert to Set so the
47
- # per-queue membership check is O(1). With 100+ queues the
48
- # Array#include? cost in the loop was O(n²) per dashboard load.
46
+ return [] if queue_names.empty?
47
+
49
48
  paused_queues = paused_queue_names.to_set
50
- queue_names.map { |name| queue_metrics_via_sql(name) }.compact.map do |q|
49
+ batched_queue_metrics(queue_names).map do |q|
51
50
  q.merge(paused: paused_queues.include?(logical_queue_name(q[:name])))
52
51
  end
53
52
  rescue StandardError => e
@@ -1008,6 +1007,46 @@ module Pgbus
1008
1007
  rows.to_a.map { |r| format_message(r, r["queue_name"]) }
1009
1008
  end
1010
1009
 
1010
+ def batched_queue_metrics(queue_names)
1011
+ return [] if queue_names.empty?
1012
+
1013
+ unions = queue_names.filter_map do |name|
1014
+ sanitized = sanitize_name(name)
1015
+ qtable = "q_#{sanitized}"
1016
+ seq_name = "#{qtable}_msg_id_seq"
1017
+ <<~SQL
1018
+ SELECT
1019
+ #{connection.quote(name)} AS queue_name,
1020
+ (SELECT count(*) FROM pgmq.#{qtable}) AS queue_length,
1021
+ (SELECT count(*) FROM pgmq.#{qtable} WHERE vt <= NOW()) AS queue_visible_length,
1022
+ (SELECT EXTRACT(epoch FROM (NOW() - max(enqueued_at)))::int FROM pgmq.#{qtable}) AS newest_msg_age_sec,
1023
+ (SELECT EXTRACT(epoch FROM (NOW() - min(enqueued_at)))::int FROM pgmq.#{qtable}) AS oldest_msg_age_sec,
1024
+ (SELECT CASE WHEN is_called THEN last_value ELSE 0 END FROM pgmq.#{seq_name}) AS total_messages
1025
+ SQL
1026
+ rescue StandardError => e
1027
+ Pgbus.logger.debug { "[Pgbus::Web] Skipping queue metrics for #{name}: #{e.message}" }
1028
+ nil
1029
+ end
1030
+
1031
+ return [] if unions.empty?
1032
+
1033
+ sql = unions.join(" UNION ALL ")
1034
+ rows = connection.select_all(sql, "Pgbus Batched Queue Metrics")
1035
+ rows.to_a.map do |row|
1036
+ {
1037
+ name: row["queue_name"],
1038
+ queue_length: row["queue_length"].to_i,
1039
+ queue_visible_length: row["queue_visible_length"].to_i,
1040
+ oldest_msg_age_sec: row["oldest_msg_age_sec"]&.to_i,
1041
+ newest_msg_age_sec: row["newest_msg_age_sec"]&.to_i,
1042
+ total_messages: row["total_messages"].to_i
1043
+ }
1044
+ end
1045
+ rescue StandardError => e
1046
+ Pgbus.logger.error { "[Pgbus::Web] Error fetching batched queue metrics: #{e.class}: #{e.message}" }
1047
+ []
1048
+ end
1049
+
1011
1050
  def queue_metrics_via_sql(queue_name)
1012
1051
  qtable = "q_#{sanitize_name(queue_name)}"
1013
1052
  seq_name = "#{qtable}_msg_id_seq"
@@ -29,7 +29,11 @@ module Pgbus
29
29
  # For a queue named `pgbus_stream_chat` the trigger table is
30
30
  # `q_pgbus_stream_chat`, so the channel is `pgmq.q_pgbus_stream_chat.INSERT`.
31
31
  class Listener
32
- WakeMessage = Data.define(:queue_name)
32
+ WakeMessage = Data.define(:queue_name, :payload) do
33
+ def initialize(queue_name:, payload: nil)
34
+ super
35
+ end
36
+ end
33
37
 
34
38
  CHANNEL_PREFIX = "pgmq.q_"
35
39
  CHANNEL_SUFFIX = ".INSERT"
@@ -110,8 +114,8 @@ module Pgbus
110
114
 
111
115
  timeout_s = @health_check_ms / 1000.0
112
116
  begin
113
- @conn.wait_for_notify(timeout_s) do |channel, _pid, _payload|
114
- handle_notify(channel)
117
+ @conn.wait_for_notify(timeout_s) do |channel, _pid, payload|
118
+ handle_notify(channel, payload)
115
119
  end || run_health_check
116
120
  rescue IOError => e
117
121
  # #stop closes the PG connection to interrupt
@@ -182,11 +186,11 @@ module Pgbus
182
186
  @listening_to.delete(channel)
183
187
  end
184
188
 
185
- def handle_notify(channel)
189
+ def handle_notify(channel, payload = nil)
186
190
  queue_name = queue_name_from(channel)
187
191
  return unless queue_name
188
192
 
189
- @dispatch_queue << WakeMessage.new(queue_name: queue_name)
193
+ @dispatch_queue << WakeMessage.new(queue_name: queue_name, payload: payload)
190
194
  end
191
195
 
192
196
  def run_health_check
@@ -92,6 +92,7 @@ module Pgbus
92
92
  # boolean assignment), the sentinel break would still fire.
93
93
  @running = false
94
94
  @thread = nil
95
+ @ephemeral_seq = 0
95
96
  end
96
97
 
97
98
  def start
@@ -136,7 +137,7 @@ module Pgbus
136
137
  # queue's current contents — once we hit a non-Wake or a
137
138
  # different stream, we stop and let the regular path handle
138
139
  # the rest.
139
- if msg.is_a?(WakeMessage)
140
+ if msg.is_a?(WakeMessage) && msg.payload.nil?
140
141
  wakes, trailing = drain_wakes_for(msg)
141
142
  wakes.each { |w| handle(w) }
142
143
  handle(trailing) if trailing
@@ -165,7 +166,7 @@ module Pgbus
165
166
  return [coalesced, nil] # queue drained
166
167
  end
167
168
 
168
- return [coalesced, peek] unless peek.is_a?(WakeMessage)
169
+ return [coalesced, peek] unless peek.is_a?(WakeMessage) && peek.payload.nil?
169
170
 
170
171
  next if seen.include?(peek.queue_name)
171
172
 
@@ -193,32 +194,26 @@ module Pgbus
193
194
 
194
195
  def handle_wake(msg)
195
196
  started_at = monotonic_ms
196
- # msg.queue_name is the PGMQ full table name (pgbus_int_pbns_xxx),
197
- # but connections are registered under the logical name (pbns_xxx).
198
- # Translate before looking up.
199
197
  stream = @full_to_logical[msg.queue_name] || msg.queue_name
200
198
  registered = @registry.connections_for(stream)
201
199
  in_flight_pairs = @in_flight[stream]
202
200
  return if registered.empty? && in_flight_pairs.empty?
203
201
 
202
+ if msg.payload
203
+ handle_ephemeral_wake(msg, stream, registered, in_flight_pairs, started_at)
204
+ else
205
+ handle_durable_wake(stream, registered, in_flight_pairs, started_at)
206
+ end
207
+ end
208
+
209
+ def handle_durable_wake(stream, registered, in_flight_pairs, started_at)
204
210
  min_seen = minimum_cursor(registered, in_flight_pairs)
205
211
  raw_envelopes = @client.read_after(stream, after_id: min_seen, limit: @read_limit)
206
212
  return if raw_envelopes.empty?
207
213
 
208
214
  envelopes = raw_envelopes.map { |e| unwrap_stream_envelope(e) }
209
- # The maximum msg_id in THIS batch. We advance every
210
- # connection's scanned cursor past this value even if the
211
- # filter drops everything — otherwise a 500-message run
212
- # of invisible broadcasts would pin minimum_cursor and
213
- # the dispatcher would re-read the same window forever,
214
- # starving later public messages. Connection#enqueue still
215
- # gates the client-facing cursor on actual successful
216
- # writes, so this advance is invisible to clients.
217
215
  max_msg_id = envelopes.map(&:msg_id).max
218
216
 
219
- # Each connection gets a per-connection filtered subset. We
220
- # can't pre-filter once because different connections have
221
- # different authorize contexts.
222
217
  registered.each do |conn|
223
218
  safe_enqueue(conn, visible_envelopes_for(envelopes, conn))
224
219
  advance_scanned_cursor(conn, max_msg_id)
@@ -230,11 +225,40 @@ module Pgbus
230
225
 
231
226
  prune_dead(registered)
232
227
 
233
- # Record one stat row per wake. Fanout is the number of
234
- # subscribers (registered + in-flight) that received the
235
- # broadcast before any filter dropped it — the "intended"
236
- # audience size, which is the useful operator number even
237
- # when audience filtering is in play.
228
+ record_stat(
229
+ stream_name: stream,
230
+ event_type: "broadcast",
231
+ started_at: started_at,
232
+ fanout: registered.size + in_flight_pairs.size
233
+ )
234
+ end
235
+
236
+ def handle_ephemeral_wake(msg, stream, registered, in_flight_pairs, started_at)
237
+ parsed = JSON.parse(msg.payload)
238
+ html = parsed.is_a?(Hash) ? parsed["html"] : nil
239
+ return unless html.is_a?(String)
240
+
241
+ visible_to = parsed["visible_to"]
242
+ visible_to = visible_to.to_sym if visible_to.is_a?(String)
243
+
244
+ @ephemeral_seq += 1
245
+ envelope = StreamEnvelope.new(
246
+ msg_id: -@ephemeral_seq,
247
+ enqueued_at: Time.now.utc.iso8601(6),
248
+ payload: html,
249
+ source: "ephemeral",
250
+ visible_to: visible_to
251
+ )
252
+
253
+ registered.each do |conn|
254
+ safe_enqueue(conn, visible_envelopes_for([envelope], conn))
255
+ end
256
+ in_flight_pairs.each do |(conn, buffer)|
257
+ buffer.concat(visible_envelopes_for([envelope], conn))
258
+ end
259
+
260
+ prune_dead(registered)
261
+
238
262
  record_stat(
239
263
  stream_name: stream,
240
264
  event_type: "broadcast",
data/lib/pgbus.rb CHANGED
@@ -104,10 +104,11 @@ module Pgbus
104
104
  # clears it. The cache key is the resolved name string, not the raw
105
105
  # streamables, so `Pgbus.stream(@order)` and `Pgbus.stream(@order)`
106
106
  # in the same process return the same instance.
107
- def stream(streamables)
107
+ def stream(streamables, durable: true)
108
108
  name = Streams::Stream.name_from(streamables)
109
+ cache_key = "#{name}:#{durable ? "d" : "e"}"
109
110
  @stream_cache ||= Concurrent::Map.new
110
- @stream_cache.compute_if_absent(name) { Streams::Stream.new(streamables) }
111
+ @stream_cache.compute_if_absent(cache_key) { Streams::Stream.new(streamables, durable: durable) }
111
112
  end
112
113
 
113
114
  # Compose a short, pgbus-safe stream identifier from any mix of
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: pgbus
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.9
4
+ version: 0.8.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Mikael Henriksson
@@ -250,6 +250,7 @@ files:
250
250
  - lib/pgbus/cli.rb
251
251
  - lib/pgbus/client.rb
252
252
  - lib/pgbus/client/ensure_stream_queue.rb
253
+ - lib/pgbus/client/notify_stream.rb
253
254
  - lib/pgbus/client/read_after.rb
254
255
  - lib/pgbus/concurrency.rb
255
256
  - lib/pgbus/concurrency/blocked_execution.rb