pgbus 0.5.1 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +244 -1
  3. data/Rakefile +8 -1
  4. data/app/controllers/pgbus/insights_controller.rb +6 -0
  5. data/app/helpers/pgbus/streams_helper.rb +115 -0
  6. data/app/javascript/pgbus/stream_source_element.js +212 -0
  7. data/app/models/pgbus/stream_stat.rb +123 -0
  8. data/app/views/pgbus/insights/show.html.erb +59 -0
  9. data/config/locales/en.yml +16 -0
  10. data/config/routes.rb +11 -0
  11. data/lib/generators/pgbus/add_job_stats_queue_index_generator.rb +53 -0
  12. data/lib/generators/pgbus/add_presence_generator.rb +55 -0
  13. data/lib/generators/pgbus/add_stream_stats_generator.rb +54 -0
  14. data/lib/generators/pgbus/templates/add_job_stats_latency.rb.erb +4 -1
  15. data/lib/generators/pgbus/templates/add_job_stats_queue_index.rb.erb +11 -0
  16. data/lib/generators/pgbus/templates/add_presence.rb.erb +26 -0
  17. data/lib/generators/pgbus/templates/add_stream_stats.rb.erb +18 -0
  18. data/lib/generators/pgbus/update_generator.rb +176 -23
  19. data/lib/pgbus/client/ensure_stream_queue.rb +54 -0
  20. data/lib/pgbus/client/read_after.rb +100 -0
  21. data/lib/pgbus/client.rb +6 -0
  22. data/lib/pgbus/configuration.rb +65 -0
  23. data/lib/pgbus/engine.rb +31 -0
  24. data/lib/pgbus/generators/config_converter.rb +22 -2
  25. data/lib/pgbus/generators/database_target_detector.rb +94 -0
  26. data/lib/pgbus/generators/migration_detector.rb +217 -0
  27. data/lib/pgbus/process/dispatcher.rb +62 -4
  28. data/lib/pgbus/streams/cursor.rb +71 -0
  29. data/lib/pgbus/streams/envelope.rb +58 -0
  30. data/lib/pgbus/streams/filters.rb +98 -0
  31. data/lib/pgbus/streams/presence.rb +216 -0
  32. data/lib/pgbus/streams/signed_name.rb +69 -0
  33. data/lib/pgbus/streams/turbo_broadcastable.rb +53 -0
  34. data/lib/pgbus/streams/watermark_cache_middleware.rb +28 -0
  35. data/lib/pgbus/streams.rb +151 -0
  36. data/lib/pgbus/version.rb +1 -1
  37. data/lib/pgbus/web/data_source.rb +88 -10
  38. data/lib/pgbus/web/stream_app.rb +179 -0
  39. data/lib/pgbus/web/streamer/connection.rb +122 -0
  40. data/lib/pgbus/web/streamer/dispatcher.rb +467 -0
  41. data/lib/pgbus/web/streamer/heartbeat.rb +105 -0
  42. data/lib/pgbus/web/streamer/instance.rb +176 -0
  43. data/lib/pgbus/web/streamer/io_writer.rb +73 -0
  44. data/lib/pgbus/web/streamer/listener.rb +228 -0
  45. data/lib/pgbus/web/streamer/registry.rb +103 -0
  46. data/lib/pgbus/web/streamer.rb +53 -0
  47. data/lib/pgbus.rb +28 -0
  48. data/lib/puma/plugin/pgbus_streams.rb +54 -0
  49. data/lib/tasks/pgbus_streams.rake +52 -0
  50. metadata +33 -1
@@ -2,58 +2,171 @@
2
2
 
3
3
  require "rails/generators"
4
4
  require "pgbus/generators/config_converter"
5
+ require "pgbus/generators/migration_detector"
6
+ require "pgbus/generators/database_target_detector"
5
7
 
6
8
  module Pgbus
7
9
  module Generators
8
- # Converts an existing config/pgbus.yml to a Ruby initializer at
9
- # config/initializers/pgbus.rb using the modern DSL.
10
+ # Upgrade command with two independent jobs:
10
11
  #
11
- # The original YAML file is left in place — the user reviews the
12
- # generated initializer and deletes the YAML when ready.
12
+ # 1. Config conversion: if config/pgbus.yml exists, convert it to
13
+ # config/initializers/pgbus.rb using the modern Ruby DSL. Skip
14
+ # silently if the initializer already exists or the YAML is
15
+ # absent — safe to re-run.
16
+ #
17
+ # 2. Migration detection: inspect the live database and add any
18
+ # missing pgbus migrations to db/migrate (or db/pgbus_migrate
19
+ # if a separate database is configured). Invokes each matching
20
+ # sub-generator in-process via Thor's invoke, so this mirrors
21
+ # what the user would get running each generator by hand.
13
22
  #
14
23
  # Usage:
15
24
  #
16
25
  # bin/rails generate pgbus:update
17
- # bin/rails generate pgbus:update --force # overwrite existing initializer
18
- # bin/rails generate pgbus:update --source=path/to/pgbus.yml
26
+ # bin/rails generate pgbus:update --dry-run
27
+ # bin/rails generate pgbus:update --skip-config
28
+ # bin/rails generate pgbus:update --skip-migrations
29
+ # bin/rails generate pgbus:update --database=pgbus
30
+ # bin/rails generate pgbus:update --quiet
19
31
  class UpdateGenerator < Rails::Generators::Base
20
- desc "Convert config/pgbus.yml to config/initializers/pgbus.rb using the Ruby DSL"
32
+ desc "Upgrade pgbus: convert YAML config + add any missing migrations"
21
33
 
22
34
  class_option :source,
23
35
  type: :string,
24
36
  default: "config/pgbus.yml",
25
- desc: "Path to the existing YAML config (default: config/pgbus.yml)"
37
+ desc: "Path to an existing YAML config to convert (default: config/pgbus.yml)"
26
38
 
27
39
  class_option :destination,
28
40
  type: :string,
29
41
  default: "config/initializers/pgbus.rb",
30
42
  desc: "Path to the generated initializer (default: config/initializers/pgbus.rb)"
31
43
 
32
- def convert
44
+ class_option :skip_config,
45
+ type: :boolean,
46
+ default: false,
47
+ desc: "Skip the YAML → Ruby initializer conversion step"
48
+
49
+ class_option :skip_migrations,
50
+ type: :boolean,
51
+ default: false,
52
+ desc: "Skip the migration detection step"
53
+
54
+ class_option :database,
55
+ type: :string,
56
+ default: nil,
57
+ desc: "Use a separate database for pgbus tables (default: auto-detect " \
58
+ "from Pgbus.configuration.connects_to or config/initializers/pgbus.rb)"
59
+
60
+ class_option :dry_run,
61
+ type: :boolean,
62
+ default: false,
63
+ desc: "Print what would be done without creating any files"
64
+
65
+ class_option :quiet,
66
+ type: :boolean,
67
+ default: false,
68
+ desc: "Suppress verbose per-step output"
69
+
70
+ def convert_yaml_if_present
71
+ return if options[:skip_config]
72
+
33
73
  source_path = File.expand_path(options[:source], destination_root)
34
74
  destination_path = File.expand_path(options[:destination], destination_root)
35
75
 
36
- # Thor::Error is the idiomatic way to abort a Rails generator. Thor
37
- # catches it, prints the message in red, and exits with status 1
38
- # without a Ruby backtrace. exit 1 would skip the framework's
39
- # cleanup hooks and is hard to test.
40
- raise Thor::Error, "Source file not found: #{options[:source]}" unless File.exist?(source_path)
76
+ unless File.exist?(source_path)
77
+ log "YAML config not found at #{options[:source]}; skipping config conversion."
78
+ return
79
+ end
80
+
81
+ if File.exist?(destination_path)
82
+ log "Initializer already exists at #{options[:destination]}; skipping config conversion."
83
+ return
84
+ end
41
85
 
42
86
  ruby_source = load_and_convert(source_path)
43
- create_file destination_path, ruby_source
87
+ if options[:dry_run]
88
+ log_change "[dry-run] would create #{options[:destination]}"
89
+ else
90
+ create_file destination_path, ruby_source
91
+ end
92
+ end
93
+
94
+ def detect_and_install_missing_migrations
95
+ return if options[:skip_migrations]
96
+
97
+ unless active_record_available?
98
+ log "ActiveRecord not loaded — skipping migration detection. Run this generator from a Rails app."
99
+ return
100
+ end
101
+
102
+ connection = resolve_connection
103
+ unless connection
104
+ log "No ActiveRecord connection available — skipping migration detection."
105
+ return
106
+ end
107
+
108
+ detector = MigrationDetector.new(connection)
109
+ missing = detector.missing_migrations
110
+
111
+ if missing.empty?
112
+ log "Database schema is up to date — no migrations needed."
113
+ return
114
+ end
115
+
116
+ if missing == [MigrationDetector::FRESH_INSTALL]
117
+ say ""
118
+ say "Database looks empty of pgbus tables — this is a fresh install.", :yellow
119
+ say "Run `rails generate pgbus:install` instead of `pgbus:update`.", :yellow
120
+ say ""
121
+ return
122
+ end
123
+
124
+ database_name = options[:database] || detected_database_name
125
+ log "Auto-detected separate database: #{database_name}" if options[:database].nil? && database_name
126
+
127
+ log "Found #{missing.size} missing migration(s):"
128
+ missing.each do |key|
129
+ description = MigrationDetector::DESCRIPTIONS[key] || key.to_s
130
+ log " - #{key}: #{description}"
131
+ end
132
+
133
+ # Two loops on purpose: print the full plan first so operators
134
+ # see what's coming, then execute. Combining would interleave
135
+ # " - add_presence: foo" with "Invoking pgbus:add_presence..."
136
+ # which hides the shape of the upgrade from the reader.
137
+ missing.each do |key| # rubocop:disable Style/CombinableLoops
138
+ generator = MigrationDetector::GENERATOR_MAP[key]
139
+ unless generator
140
+ say " ! no generator mapped for #{key}, skipping", :red
141
+ next
142
+ end
143
+
144
+ if options[:dry_run]
145
+ log_change "[dry-run] would invoke #{generator}#{" --database=#{database_name}" if database_name}"
146
+ next
147
+ end
148
+
149
+ invoke_args = []
150
+ invoke_args << "--database=#{database_name}" if database_name
151
+ log "Invoking #{generator}#{" --database=#{database_name}" if database_name}..."
152
+ invoke generator, invoke_args
153
+ end
44
154
  end
45
155
 
46
156
  def display_post_install
157
+ return if options[:quiet]
158
+
47
159
  say ""
48
- say "Pgbus initializer generated at #{options[:destination]}!", :green
49
- say ""
50
- say "Next steps:"
51
- say " 1. Review the generated initializer for correctness"
52
- say " 2. Boot your app and verify everything still works"
53
- say " 3. Delete #{options[:source]} when satisfied (Pgbus will stop reading it)"
160
+ say "Pgbus update complete.", :green
54
161
  say ""
55
- say "If you spot a setting that didn't translate cleanly, please open an issue:"
56
- say " https://github.com/mhenrixon/pgbus/issues", :cyan
162
+ if options[:dry_run]
163
+ say "Dry-run: no files were created.", :yellow
164
+ else
165
+ say "Next steps:"
166
+ say " 1. Review the generated migration files in db/migrate (or db/pgbus_migrate)"
167
+ say " 2. Run: rails db:migrate#{":#{effective_database_name}" if effective_database_name}"
168
+ say " 3. Restart pgbus: bin/pgbus start"
169
+ end
57
170
  say ""
58
171
  end
59
172
 
@@ -70,6 +183,46 @@ module Pgbus
70
183
  rescue ConfigConverter::Error, Psych::Exception, Errno::ENOENT, Errno::EACCES => e
71
184
  raise Thor::Error, "Failed to convert #{options[:source]}: #{e.message}"
72
185
  end
186
+
187
+ def active_record_available?
188
+ defined?(::ActiveRecord::Base) && ::ActiveRecord::Base.respond_to?(:connection)
189
+ end
190
+
191
+ # Resolve the AR connection to inspect. If pgbus is configured to
192
+ # use a separate database (via connects_to), use BusRecord's
193
+ # connection so the detector probes the right schema.
194
+ def resolve_connection
195
+ if defined?(Pgbus) && Pgbus.respond_to?(:configuration) && Pgbus.configuration.connects_to
196
+ Pgbus::BusRecord.connection
197
+ else
198
+ ::ActiveRecord::Base.connection
199
+ end
200
+ rescue StandardError => e
201
+ say " ! could not resolve AR connection: #{e.class}: #{e.message}", :red
202
+ nil
203
+ end
204
+
205
+ def detected_database_name
206
+ @detected_database_name ||= DatabaseTargetDetector.new(
207
+ destination_root: destination_root
208
+ ).detect
209
+ end
210
+
211
+ def effective_database_name
212
+ options[:database] || detected_database_name
213
+ end
214
+
215
+ def log(message)
216
+ return if options[:quiet]
217
+
218
+ say message
219
+ end
220
+
221
+ def log_change(message)
222
+ return if options[:quiet]
223
+
224
+ say message, :yellow
225
+ end
73
226
  end
74
227
  end
75
228
  end
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Pgbus
4
+ class Client
5
+ # Idempotent stream-queue setup. Creates the PGMQ queue (delegating to
6
+ # `ensure_queue` which already handles schema bootstrap and dedup),
7
+ # overrides the NOTIFY throttle to 0 so every broadcast fires its
8
+ # own NOTIFY, and adds an `msg_id` index on the archive table that
9
+ # PGMQ does not ship with.
10
+ #
11
+ # PGMQ's archive tables (`pgmq.a_<name>`) only carry an `archived_at`
12
+ # index by default. `Client#read_after`'s replay query filters by
13
+ # `WHERE msg_id > $1`, which becomes a sequential scan once the archive
14
+ # grows past a few thousand rows. We add the index here, scoped to
15
+ # stream queues only, so users with chat-history-style retention don't
16
+ # hit a performance cliff.
17
+ #
18
+ # Called from `Pgbus.stream(name).broadcast(...)` on first publish per
19
+ # stream and from the streamer on first subscription per stream.
20
+ module EnsureStreamQueue
21
+ def ensure_stream_queue(stream_name)
22
+ ensure_queue(stream_name)
23
+ full_name = config.queue_name(stream_name)
24
+
25
+ # PGMQ's default NOTIFY throttle is 250ms — meant to coalesce
26
+ # high-frequency worker queue inserts. Streams are latency-
27
+ # sensitive and need every broadcast to fire a NOTIFY, even
28
+ # when several are batched within a single millisecond.
29
+ # Override the throttle to 0 specifically for stream queues.
30
+ synchronized { @pgmq.enable_notify_insert(full_name, throttle_interval_ms: 0) } if config.listen_notify
31
+
32
+ # CREATE INDEX IF NOT EXISTS is idempotent in Postgres but still
33
+ # requires a roundtrip and a brief ACCESS SHARE lock on the archive
34
+ # table. Broadcast-per-after_commit loops can hit this 1000x/sec on
35
+ # the same stream, so memoize per-process after the first success.
36
+ return if @stream_indexes_created[stream_name]
37
+
38
+ sanitized = QueueNameValidator.sanitize!(full_name)
39
+ sql = <<~SQL
40
+ CREATE INDEX IF NOT EXISTS a_#{sanitized}_msg_id_idx
41
+ ON pgmq.a_#{sanitized} (msg_id)
42
+ SQL
43
+
44
+ synchronized do
45
+ with_raw_connection do |conn|
46
+ conn.exec(sql)
47
+ end
48
+ end
49
+
50
+ @stream_indexes_created[stream_name] = true
51
+ end
52
+ end
53
+ end
54
+ end
@@ -0,0 +1,100 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Pgbus
4
+ class Client
5
+ # Non-consuming peek across PGMQ live (`q_`) and archive (`a_`) tables. Used
6
+ # exclusively by `Pgbus::Web::Streamer` for SSE replay — workers continue to
7
+ # use `read_batch` (claim semantics). The two read paths are disjoint.
8
+ #
9
+ # The cursor is the highest msg_id the client has already seen. Replay returns
10
+ # everything strictly greater, ordered by msg_id ASC, capped by `limit`.
11
+ module ReadAfter
12
+ Envelope = Data.define(:msg_id, :enqueued_at, :payload, :source)
13
+
14
+ DEFAULT_LIMIT = 500
15
+
16
+ def read_after(stream_name, after_id:, limit: DEFAULT_LIMIT)
17
+ sanitized = sanitized_queue(stream_name)
18
+ sql = build_read_after_sql(sanitized)
19
+
20
+ rows = synchronized do
21
+ with_raw_connection do |conn|
22
+ conn.exec_params(sql, [after_id.to_i, limit.to_i]).to_a
23
+ end
24
+ end
25
+
26
+ rows.map { |row| build_envelope(row) }
27
+ end
28
+
29
+ def stream_current_msg_id(stream_name)
30
+ sanitized = sanitized_queue(stream_name)
31
+ sql = "SELECT COALESCE(MAX(msg_id), 0) AS max FROM pgmq.q_#{sanitized}"
32
+ synchronized do
33
+ with_raw_connection do |conn|
34
+ conn.exec(sql).first.fetch("max").to_i
35
+ end
36
+ end
37
+ end
38
+
39
+ def stream_oldest_msg_id(stream_name)
40
+ sanitized = sanitized_queue(stream_name)
41
+ sql = <<~SQL
42
+ SELECT LEAST(
43
+ (SELECT MIN(msg_id) FROM pgmq.q_#{sanitized}),
44
+ (SELECT MIN(msg_id) FROM pgmq.a_#{sanitized})
45
+ ) AS least
46
+ SQL
47
+ synchronized do
48
+ with_raw_connection do |conn|
49
+ value = conn.exec(sql).first.fetch("least")
50
+ value&.to_i
51
+ end
52
+ end
53
+ end
54
+
55
+ private
56
+
57
+ # Builds the union of live and archive tables. The outer ORDER BY + LIMIT
58
+ # ensures we never return more than `limit` rows total even if both
59
+ # subqueries hit it. The 'live'/'archive' constants are how the streamer
60
+ # tells whether a row was peeked from the queue or replayed from history;
61
+ # the streamer doesn't currently distinguish them, but we keep the column
62
+ # so debugging is straightforward when archive replay misbehaves.
63
+ def build_read_after_sql(sanitized)
64
+ <<~SQL
65
+ (
66
+ SELECT msg_id, enqueued_at, message, 'live'::text AS source
67
+ FROM pgmq.q_#{sanitized}
68
+ WHERE msg_id > $1
69
+ ORDER BY msg_id ASC
70
+ LIMIT $2
71
+ )
72
+ UNION ALL
73
+ (
74
+ SELECT msg_id, enqueued_at, message, 'archive'::text AS source
75
+ FROM pgmq.a_#{sanitized}
76
+ WHERE msg_id > $1
77
+ ORDER BY msg_id ASC
78
+ LIMIT $2
79
+ )
80
+ ORDER BY msg_id ASC
81
+ LIMIT $2
82
+ SQL
83
+ end
84
+
85
+ def sanitized_queue(stream_name)
86
+ full = config.queue_name(stream_name)
87
+ QueueNameValidator.sanitize!(full)
88
+ end
89
+
90
+ def build_envelope(row)
91
+ Envelope.new(
92
+ msg_id: row.fetch("msg_id").to_i,
93
+ enqueued_at: row.fetch("enqueued_at"),
94
+ payload: row.fetch("message"),
95
+ source: row.fetch("source")
96
+ )
97
+ end
98
+ end
99
+ end
100
+ end
data/lib/pgbus/client.rb CHANGED
@@ -1,9 +1,14 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require "json"
4
+ require_relative "client/read_after"
5
+ require_relative "client/ensure_stream_queue"
4
6
 
5
7
  module Pgbus
6
8
  class Client
9
+ include ReadAfter
10
+ include EnsureStreamQueue
11
+
7
12
  attr_reader :pgmq, :config
8
13
 
9
14
  PGMQ_REQUIRE_MUTEX = Mutex.new
@@ -49,6 +54,7 @@ module Pgbus
49
54
  end
50
55
 
51
56
  @queues_created = Concurrent::Map.new
57
+ @stream_indexes_created = Concurrent::Map.new
52
58
  @queue_strategy = QueueFactory.for(config)
53
59
  @schema_ensured = false
54
60
  end
@@ -84,6 +84,13 @@ module Pgbus
84
84
  attr_accessor :web_auth, :web_refresh_interval, :web_per_page, :web_live_updates, :web_data_source,
85
85
  :insights_default_minutes, :base_controller_class, :return_to_app_url
86
86
 
87
+ # Streams (turbo-rails replacement, SSE-based)
88
+ attr_accessor :streams_enabled, :streams_queue_prefix, :streams_signed_name_secret,
89
+ :streams_default_retention, :streams_retention, :streams_heartbeat_interval,
90
+ :streams_max_connections, :streams_idle_timeout, :streams_listen_health_check_ms,
91
+ :streams_write_deadline_ms, :streams_falcon_streaming_body,
92
+ :streams_stats_enabled
93
+
87
94
  def initialize
88
95
  @database_url = nil
89
96
  @connection_params = nil
@@ -150,6 +157,34 @@ module Pgbus
150
157
  @insights_default_minutes = 30 * 24 * 60 # 30 days
151
158
  @base_controller_class = "::ActionController::Base"
152
159
  @return_to_app_url = nil
160
+
161
+ @streams_enabled = true
162
+ @streams_queue_prefix = "pgbus_stream"
163
+ @streams_signed_name_secret = nil
164
+ @streams_default_retention = 5 * 60 # 5 minutes
165
+ @streams_retention = {}
166
+ @streams_heartbeat_interval = 15
167
+ @streams_max_connections = 2_000
168
+ @streams_idle_timeout = 3_600 # 1 hour
169
+ # 250ms — this value plays two roles: (1) the TCP keepalive
170
+ # interval for the streamer's PG LISTEN connection, and (2) the
171
+ # upper bound on how long Dispatcher#handle_connect waits for
172
+ # the Listener to acknowledge a synchronous ensure_listening
173
+ # call. 5s was unbounded enough to drop messages on a
174
+ # realistic subscribe burst; 250ms keeps the connect-path race
175
+ # window tight while still leaving headroom over a typical
176
+ # PG keepalive interval.
177
+ @streams_listen_health_check_ms = 250
178
+ @streams_write_deadline_ms = 5_000
179
+ @streams_falcon_streaming_body = false
180
+ # Opt-in: when true, the Dispatcher writes one row to
181
+ # pgbus_stream_stats per broadcast/connect/disconnect. Default
182
+ # off because stream event volume can be much higher than job
183
+ # volume and the Insights surface is only useful if operators
184
+ # actually look at it. Separate from #stats_enabled (which
185
+ # gates pgbus_job_stats recording) on purpose — operators
186
+ # usually want job stats on and stream stats off, or vice versa.
187
+ @streams_stats_enabled = false
153
188
  end
154
189
 
155
190
  def queue_name(name)
@@ -208,9 +243,39 @@ module Pgbus
208
243
  raise ArgumentError, "insights_default_minutes must be a positive integer"
209
244
  end
210
245
 
246
+ validate_streams!
247
+
211
248
  self
212
249
  end
213
250
 
251
+ def validate_streams!
252
+ unless streams_default_retention.is_a?(Numeric) && streams_default_retention >= 0
253
+ raise ArgumentError, "streams_default_retention must be a non-negative number"
254
+ end
255
+
256
+ unless streams_max_connections.is_a?(Integer) && streams_max_connections.positive?
257
+ raise ArgumentError, "streams_max_connections must be a positive integer"
258
+ end
259
+
260
+ unless streams_heartbeat_interval.is_a?(Numeric) && streams_heartbeat_interval.positive?
261
+ raise ArgumentError, "streams_heartbeat_interval must be a positive number"
262
+ end
263
+
264
+ unless streams_idle_timeout.is_a?(Numeric) && streams_idle_timeout.positive?
265
+ raise ArgumentError, "streams_idle_timeout must be a positive number"
266
+ end
267
+
268
+ unless streams_listen_health_check_ms.is_a?(Integer) && streams_listen_health_check_ms.positive?
269
+ raise ArgumentError, "streams_listen_health_check_ms must be a positive integer"
270
+ end
271
+
272
+ unless streams_write_deadline_ms.is_a?(Integer) && streams_write_deadline_ms.positive?
273
+ raise ArgumentError, "streams_write_deadline_ms must be a positive integer"
274
+ end
275
+
276
+ raise ArgumentError, "streams_retention must be a Hash" unless streams_retention.is_a?(Hash)
277
+ end
278
+
214
279
  # Set the worker capsule list. Accepts:
215
280
  #
216
281
  # String — parsed via Pgbus::Configuration::CapsuleDSL into capsules
data/lib/pgbus/engine.rb CHANGED
@@ -46,6 +46,7 @@ module Pgbus
46
46
 
47
47
  rake_tasks do
48
48
  load File.expand_path("../tasks/pgbus_pgmq.rake", __dir__)
49
+ load File.expand_path("../tasks/pgbus_streams.rake", __dir__)
49
50
  end
50
51
 
51
52
  initializer "pgbus.i18n" do
@@ -56,5 +57,35 @@ module Pgbus
56
57
  require "pgbus/web/authentication"
57
58
  require "pgbus/web/data_source"
58
59
  end
60
+
61
+ # Install the watermark cache middleware ahead of the app's own
62
+ # middleware so the thread-local cache is cleared between every
63
+ # Rack request. Without this, repeated page renders served by the
64
+ # same Puma thread would see stale current_msg_id values.
65
+ initializer "pgbus.streams.middleware" do |app|
66
+ app.middleware.use Pgbus::Streams::WatermarkCacheMiddleware if Pgbus.configuration.streams_enabled
67
+ end
68
+
69
+ # Install the Turbo::StreamsChannel patch after turbo-rails has been
70
+ # loaded. The patch redirects broadcast_stream_to through Pgbus.stream
71
+ # instead of ActionCable. When turbo-rails is not loaded, this is a
72
+ # no-op and pgbus_stream_from still works via the explicit
73
+ # Pgbus.stream(...).broadcast(...) API.
74
+ initializer "pgbus.streams.turbo_broadcastable", after: :load_config_initializers do
75
+ ActiveSupport.on_load(:after_initialize) do
76
+ if Pgbus.configuration.streams_enabled
77
+ # Touch the constant first so Zeitwerk autoloads
78
+ # lib/pgbus/streams/turbo_broadcastable.rb. The file defines
79
+ # `Pgbus::Streams::TurboBroadcastable` (the autoloaded const)
80
+ # AND `Pgbus::Streams.install_turbo_broadcastable_patch!`
81
+ # (a side-effect class method on the parent module). Without
82
+ # the constant reference, Zeitwerk doesn't load the file and
83
+ # the method call below raises NoMethodError. Assigning to
84
+ # `_` keeps RuboCop's Lint/Void from deleting the line.
85
+ _autoload_trigger = Pgbus::Streams::TurboBroadcastable
86
+ Pgbus::Streams.install_turbo_broadcastable_patch!
87
+ end
88
+ end
89
+ end
59
90
  end
60
91
  end
@@ -151,15 +151,23 @@ module Pgbus
151
151
  end
152
152
 
153
153
  # Returns [constant_settings, varying_settings].
154
- # constant_settings: { "key" => value } (same value across all envs)
154
+ # constant_settings: { "key" => value } (same value in EVERY env)
155
155
  # varying_settings: { "key" => { env => value, ... } }
156
+ #
157
+ # A setting is only "constant" when it is present in every env
158
+ # and all envs agree on the value. If any env is missing the
159
+ # setting entirely (e.g. `polling_interval: 0.01` set only under
160
+ # `test:`), emitting it as an unconditional line would silently
161
+ # apply the value to envs that never asked for it — see #93.
156
162
  def partition_by_variance(all_settings)
157
163
  constant = {}
158
164
  varying = {}
159
165
  all_settings.each do |key, env_values|
160
166
  present_values = env_values.reject { |_, v| v == :__missing__ }
161
167
  unique_values = present_values.values.uniq
162
- if unique_values.size <= 1
168
+ all_envs_present = present_values.size == env_values.size
169
+
170
+ if all_envs_present && unique_values.size <= 1
163
171
  constant[key] = unique_values.first
164
172
  else
165
173
  varying[key] = present_values
@@ -182,6 +190,18 @@ module Pgbus
182
190
 
183
191
  def render_varying_setting(key, env_values)
184
192
  envs = env_values.keys
193
+
194
+ # Single-env coverage: the setting exists in exactly one env.
195
+ # Emit an `if Rails.env.X?` modifier rather than a case block
196
+ # so other envs fall back to the gem default. This is the
197
+ # fix for #93 — without it, a `test:`-only `polling_interval`
198
+ # would leak into dev and prod as an unconditional assignment.
199
+ if envs.size == 1
200
+ env = envs.first
201
+ value = env_values[env]
202
+ return ["c.#{key} = #{render_value(key, value)} if Rails.env.#{env}?"]
203
+ end
204
+
185
205
  if envs.size == 2 && envs.include?("development")
186
206
  # Special case: "everything except dev" — common pattern
187
207
  non_dev_value = env_values.except("development").values.first