pgbus 0.6.0 → 0.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +6 -1
- data/app/models/pgbus/stream_stat.rb +10 -5
- data/lib/generators/pgbus/add_job_stats_queue_index_generator.rb +53 -0
- data/lib/generators/pgbus/templates/add_job_stats_latency.rb.erb +4 -1
- data/lib/generators/pgbus/templates/add_job_stats_queue_index.rb.erb +11 -0
- data/lib/generators/pgbus/update_generator.rb +176 -23
- data/lib/pgbus/generators/config_converter.rb +22 -2
- data/lib/pgbus/generators/database_target_detector.rb +94 -0
- data/lib/pgbus/generators/migration_detector.rb +217 -0
- data/lib/pgbus/version.rb +1 -1
- data/lib/pgbus/web/data_source.rb +59 -10
- metadata +5 -1
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: f9e7369bd90c71a1c25bc9fdafe8905d8cdca99e5ae1deffb9c7b304aecc881d
|
|
4
|
+
data.tar.gz: 44d3fb5bf94742541ba5f9474c06cc602b2a7490decb7daee846c4b8f5ce1bbe
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: be5efa4a81ec3244475bdbbda0dd90aad02076f422ffc0f6def444e5a0553a875a414a3f98049dff7a3471700515168ac7cab1e4394d379d8a316cea4d1981e4
|
|
7
|
+
data.tar.gz: 515bf628c80f44252579432eb34d27c3f14fb830a7be0e479ca98b5d5651823c5dfb044276d44e9624e122accfab1faf93479827fcf92a5cacc75ef300a534f7
|
data/README.md
CHANGED
|
@@ -113,7 +113,12 @@ end
|
|
|
113
113
|
|
|
114
114
|
The capsule string DSL is the shortest form for the common case. Use `c.capsule` when you need named capsules with advanced options like `single_active_consumer` or `consumer_priority`. See [Routing and ordering](#routing-and-ordering) for the full set.
|
|
115
115
|
|
|
116
|
-
> **
|
|
116
|
+
> **Upgrading from an older pgbus?** Run `rails generate pgbus:update`. It does two things in one pass:
|
|
117
|
+
>
|
|
118
|
+
> - Converts any legacy `config/pgbus.yml` to a Ruby initializer at `config/initializers/pgbus.rb` (skipped if the initializer already exists).
|
|
119
|
+
> - Inspects your live database and adds any missing pgbus migrations to `db/migrate` (or `db/pgbus_migrate` if you use `connects_to`). The generator detects your separate-database config automatically from `Pgbus.configuration.connects_to` or by scanning the initializer / `config/application.rb`, so you don't have to re-specify `--database=pgbus` every time.
|
|
120
|
+
>
|
|
121
|
+
> Useful flags: `--dry-run` (print the plan without creating files), `--skip-config`, `--skip-migrations`, `--quiet`. Running it on a database with no pgbus tables at all will redirect you to `pgbus:install` instead of stacking individual add_* migrations.
|
|
117
122
|
|
|
118
123
|
### 2. Use as ActiveJob backend
|
|
119
124
|
|
|
@@ -14,9 +14,14 @@ module Pgbus
|
|
|
14
14
|
EVENT_TYPES = %w[broadcast connect disconnect].freeze
|
|
15
15
|
|
|
16
16
|
scope :since, ->(time) { where("created_at >= ?", time) }
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
17
|
+
# NOTE: scope names intentionally avoid `broadcasts` / `connects` /
|
|
18
|
+
# `disconnects`. `turbo-rails` auto-includes `Turbo::Broadcastable`
|
|
19
|
+
# into every AR model, which defines a class method `broadcasts`.
|
|
20
|
+
# AR's dangerous_class_method? guard then rejects a colliding
|
|
21
|
+
# scope name at load time, crashing eager_load. See issue #92.
|
|
22
|
+
scope :broadcast_events, -> { where(event_type: "broadcast") }
|
|
23
|
+
scope :connect_events, -> { where(event_type: "connect") }
|
|
24
|
+
scope :disconnect_events, -> { where(event_type: "disconnect") }
|
|
20
25
|
|
|
21
26
|
# Records a stream event. Called from the Dispatcher when the
|
|
22
27
|
# `streams_stats_enabled` flag is set. Errors are swallowed so a
|
|
@@ -79,7 +84,7 @@ module Pgbus
|
|
|
79
84
|
|
|
80
85
|
# Top N streams by broadcast count in the window, with avg fanout.
|
|
81
86
|
def self.top_streams(limit: 10, minutes: 60)
|
|
82
|
-
|
|
87
|
+
broadcast_events
|
|
83
88
|
.since(minutes.minutes.ago)
|
|
84
89
|
.group(:stream_name)
|
|
85
90
|
.order(Arel.sql("COUNT(*) DESC"))
|
|
@@ -102,7 +107,7 @@ module Pgbus
|
|
|
102
107
|
|
|
103
108
|
# Throughput: broadcast events per minute bucketed by minute.
|
|
104
109
|
def self.throughput(minutes: 60)
|
|
105
|
-
|
|
110
|
+
broadcast_events
|
|
106
111
|
.since(minutes.minutes.ago)
|
|
107
112
|
.group("date_trunc('minute', created_at)")
|
|
108
113
|
.order(Arel.sql("date_trunc('minute', created_at)"))
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "rails/generators"
|
|
4
|
+
require "rails/generators/active_record"
|
|
5
|
+
|
|
6
|
+
module Pgbus
|
|
7
|
+
module Generators
|
|
8
|
+
class AddJobStatsQueueIndexGenerator < Rails::Generators::Base
|
|
9
|
+
include ActiveRecord::Generators::Migration
|
|
10
|
+
|
|
11
|
+
source_root File.expand_path("templates", __dir__)
|
|
12
|
+
|
|
13
|
+
desc "Add composite index on pgbus_job_stats (queue_name, created_at) for Insights latency-by-queue aggregation"
|
|
14
|
+
|
|
15
|
+
class_option :database,
|
|
16
|
+
type: :string,
|
|
17
|
+
default: nil,
|
|
18
|
+
desc: "Use a separate database for pgbus tables (e.g. --database=pgbus)"
|
|
19
|
+
|
|
20
|
+
def create_migration_file
|
|
21
|
+
if separate_database?
|
|
22
|
+
migration_template "add_job_stats_queue_index.rb.erb",
|
|
23
|
+
"db/pgbus_migrate/add_pgbus_job_stats_queue_index.rb"
|
|
24
|
+
else
|
|
25
|
+
migration_template "add_job_stats_queue_index.rb.erb",
|
|
26
|
+
"db/migrate/add_pgbus_job_stats_queue_index.rb"
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def display_post_install
|
|
31
|
+
say ""
|
|
32
|
+
say "Pgbus job stats queue index added!", :green
|
|
33
|
+
say ""
|
|
34
|
+
say "Next steps:"
|
|
35
|
+
say " 1. Run: rails db:migrate#{":#{options[:database]}" if separate_database?}"
|
|
36
|
+
say " 2. The Insights 'latency by queue' aggregation will now use the index"
|
|
37
|
+
say " instead of sequentially scanning pgbus_job_stats. Install this on"
|
|
38
|
+
say " heavy-traffic deployments with a large job stats retention window."
|
|
39
|
+
say ""
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
private
|
|
43
|
+
|
|
44
|
+
def migration_version
|
|
45
|
+
"[#{ActiveRecord::Migration.current_version}]"
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
def separate_database?
|
|
49
|
+
options[:database].present?
|
|
50
|
+
end
|
|
51
|
+
end
|
|
52
|
+
end
|
|
53
|
+
end
|
|
@@ -3,7 +3,10 @@ class AddPgbusJobStatsLatency < ActiveRecord::Migration<%= migration_version %>
|
|
|
3
3
|
add_column :pgbus_job_stats, :enqueue_latency_ms, :bigint
|
|
4
4
|
add_column :pgbus_job_stats, :retry_count, :integer, default: 0
|
|
5
5
|
|
|
6
|
+
# idempotent: the standalone add_job_stats_queue_index generator
|
|
7
|
+
# creates the same index. Either order is safe.
|
|
6
8
|
add_index :pgbus_job_stats, [:queue_name, :created_at],
|
|
7
|
-
name: "idx_pgbus_job_stats_queue_time"
|
|
9
|
+
name: "idx_pgbus_job_stats_queue_time",
|
|
10
|
+
if_not_exists: true
|
|
8
11
|
end
|
|
9
12
|
end
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
class AddPgbusJobStatsQueueIndex < ActiveRecord::Migration<%= migration_version %>
|
|
2
|
+
def change
|
|
3
|
+
# Supports InsightsController#latency_by_queue and any other
|
|
4
|
+
# per-queue aggregation over pgbus_job_stats. Without this, a
|
|
5
|
+
# 60-day window over 1M jobs/day seq-scans the whole table for
|
|
6
|
+
# every Insights page load with the latency tab open.
|
|
7
|
+
add_index :pgbus_job_stats, %i[queue_name created_at],
|
|
8
|
+
name: "idx_pgbus_job_stats_queue_time",
|
|
9
|
+
if_not_exists: true
|
|
10
|
+
end
|
|
11
|
+
end
|
|
@@ -2,58 +2,171 @@
|
|
|
2
2
|
|
|
3
3
|
require "rails/generators"
|
|
4
4
|
require "pgbus/generators/config_converter"
|
|
5
|
+
require "pgbus/generators/migration_detector"
|
|
6
|
+
require "pgbus/generators/database_target_detector"
|
|
5
7
|
|
|
6
8
|
module Pgbus
|
|
7
9
|
module Generators
|
|
8
|
-
#
|
|
9
|
-
# config/initializers/pgbus.rb using the modern DSL.
|
|
10
|
+
# Upgrade command with two independent jobs:
|
|
10
11
|
#
|
|
11
|
-
#
|
|
12
|
-
#
|
|
12
|
+
# 1. Config conversion: if config/pgbus.yml exists, convert it to
|
|
13
|
+
# config/initializers/pgbus.rb using the modern Ruby DSL. Skip
|
|
14
|
+
# silently if the initializer already exists or the YAML is
|
|
15
|
+
# absent — safe to re-run.
|
|
16
|
+
#
|
|
17
|
+
# 2. Migration detection: inspect the live database and add any
|
|
18
|
+
# missing pgbus migrations to db/migrate (or db/pgbus_migrate
|
|
19
|
+
# if a separate database is configured). Invokes each matching
|
|
20
|
+
# sub-generator in-process via Thor's invoke, so this mirrors
|
|
21
|
+
# what the user would get running each generator by hand.
|
|
13
22
|
#
|
|
14
23
|
# Usage:
|
|
15
24
|
#
|
|
16
25
|
# bin/rails generate pgbus:update
|
|
17
|
-
# bin/rails generate pgbus:update --
|
|
18
|
-
# bin/rails generate pgbus:update --
|
|
26
|
+
# bin/rails generate pgbus:update --dry-run
|
|
27
|
+
# bin/rails generate pgbus:update --skip-config
|
|
28
|
+
# bin/rails generate pgbus:update --skip-migrations
|
|
29
|
+
# bin/rails generate pgbus:update --database=pgbus
|
|
30
|
+
# bin/rails generate pgbus:update --quiet
|
|
19
31
|
class UpdateGenerator < Rails::Generators::Base
|
|
20
|
-
desc "
|
|
32
|
+
desc "Upgrade pgbus: convert YAML config + add any missing migrations"
|
|
21
33
|
|
|
22
34
|
class_option :source,
|
|
23
35
|
type: :string,
|
|
24
36
|
default: "config/pgbus.yml",
|
|
25
|
-
desc: "Path to
|
|
37
|
+
desc: "Path to an existing YAML config to convert (default: config/pgbus.yml)"
|
|
26
38
|
|
|
27
39
|
class_option :destination,
|
|
28
40
|
type: :string,
|
|
29
41
|
default: "config/initializers/pgbus.rb",
|
|
30
42
|
desc: "Path to the generated initializer (default: config/initializers/pgbus.rb)"
|
|
31
43
|
|
|
32
|
-
|
|
44
|
+
class_option :skip_config,
|
|
45
|
+
type: :boolean,
|
|
46
|
+
default: false,
|
|
47
|
+
desc: "Skip the YAML → Ruby initializer conversion step"
|
|
48
|
+
|
|
49
|
+
class_option :skip_migrations,
|
|
50
|
+
type: :boolean,
|
|
51
|
+
default: false,
|
|
52
|
+
desc: "Skip the migration detection step"
|
|
53
|
+
|
|
54
|
+
class_option :database,
|
|
55
|
+
type: :string,
|
|
56
|
+
default: nil,
|
|
57
|
+
desc: "Use a separate database for pgbus tables (default: auto-detect " \
|
|
58
|
+
"from Pgbus.configuration.connects_to or config/initializers/pgbus.rb)"
|
|
59
|
+
|
|
60
|
+
class_option :dry_run,
|
|
61
|
+
type: :boolean,
|
|
62
|
+
default: false,
|
|
63
|
+
desc: "Print what would be done without creating any files"
|
|
64
|
+
|
|
65
|
+
class_option :quiet,
|
|
66
|
+
type: :boolean,
|
|
67
|
+
default: false,
|
|
68
|
+
desc: "Suppress verbose per-step output"
|
|
69
|
+
|
|
70
|
+
def convert_yaml_if_present
|
|
71
|
+
return if options[:skip_config]
|
|
72
|
+
|
|
33
73
|
source_path = File.expand_path(options[:source], destination_root)
|
|
34
74
|
destination_path = File.expand_path(options[:destination], destination_root)
|
|
35
75
|
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
76
|
+
unless File.exist?(source_path)
|
|
77
|
+
log "YAML config not found at #{options[:source]}; skipping config conversion."
|
|
78
|
+
return
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
if File.exist?(destination_path)
|
|
82
|
+
log "Initializer already exists at #{options[:destination]}; skipping config conversion."
|
|
83
|
+
return
|
|
84
|
+
end
|
|
41
85
|
|
|
42
86
|
ruby_source = load_and_convert(source_path)
|
|
43
|
-
|
|
87
|
+
if options[:dry_run]
|
|
88
|
+
log_change "[dry-run] would create #{options[:destination]}"
|
|
89
|
+
else
|
|
90
|
+
create_file destination_path, ruby_source
|
|
91
|
+
end
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
def detect_and_install_missing_migrations
|
|
95
|
+
return if options[:skip_migrations]
|
|
96
|
+
|
|
97
|
+
unless active_record_available?
|
|
98
|
+
log "ActiveRecord not loaded — skipping migration detection. Run this generator from a Rails app."
|
|
99
|
+
return
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
connection = resolve_connection
|
|
103
|
+
unless connection
|
|
104
|
+
log "No ActiveRecord connection available — skipping migration detection."
|
|
105
|
+
return
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
detector = MigrationDetector.new(connection)
|
|
109
|
+
missing = detector.missing_migrations
|
|
110
|
+
|
|
111
|
+
if missing.empty?
|
|
112
|
+
log "Database schema is up to date — no migrations needed."
|
|
113
|
+
return
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
if missing == [MigrationDetector::FRESH_INSTALL]
|
|
117
|
+
say ""
|
|
118
|
+
say "Database looks empty of pgbus tables — this is a fresh install.", :yellow
|
|
119
|
+
say "Run `rails generate pgbus:install` instead of `pgbus:update`.", :yellow
|
|
120
|
+
say ""
|
|
121
|
+
return
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
database_name = options[:database] || detected_database_name
|
|
125
|
+
log "Auto-detected separate database: #{database_name}" if options[:database].nil? && database_name
|
|
126
|
+
|
|
127
|
+
log "Found #{missing.size} missing migration(s):"
|
|
128
|
+
missing.each do |key|
|
|
129
|
+
description = MigrationDetector::DESCRIPTIONS[key] || key.to_s
|
|
130
|
+
log " - #{key}: #{description}"
|
|
131
|
+
end
|
|
132
|
+
|
|
133
|
+
# Two loops on purpose: print the full plan first so operators
|
|
134
|
+
# see what's coming, then execute. Combining would interleave
|
|
135
|
+
# " - add_presence: foo" with "Invoking pgbus:add_presence..."
|
|
136
|
+
# which hides the shape of the upgrade from the reader.
|
|
137
|
+
missing.each do |key| # rubocop:disable Style/CombinableLoops
|
|
138
|
+
generator = MigrationDetector::GENERATOR_MAP[key]
|
|
139
|
+
unless generator
|
|
140
|
+
say " ! no generator mapped for #{key}, skipping", :red
|
|
141
|
+
next
|
|
142
|
+
end
|
|
143
|
+
|
|
144
|
+
if options[:dry_run]
|
|
145
|
+
log_change "[dry-run] would invoke #{generator}#{" --database=#{database_name}" if database_name}"
|
|
146
|
+
next
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
invoke_args = []
|
|
150
|
+
invoke_args << "--database=#{database_name}" if database_name
|
|
151
|
+
log "Invoking #{generator}#{" --database=#{database_name}" if database_name}..."
|
|
152
|
+
invoke generator, invoke_args
|
|
153
|
+
end
|
|
44
154
|
end
|
|
45
155
|
|
|
46
156
|
def display_post_install
|
|
157
|
+
return if options[:quiet]
|
|
158
|
+
|
|
47
159
|
say ""
|
|
48
|
-
say "Pgbus
|
|
49
|
-
say ""
|
|
50
|
-
say "Next steps:"
|
|
51
|
-
say " 1. Review the generated initializer for correctness"
|
|
52
|
-
say " 2. Boot your app and verify everything still works"
|
|
53
|
-
say " 3. Delete #{options[:source]} when satisfied (Pgbus will stop reading it)"
|
|
160
|
+
say "Pgbus update complete.", :green
|
|
54
161
|
say ""
|
|
55
|
-
|
|
56
|
-
|
|
162
|
+
if options[:dry_run]
|
|
163
|
+
say "Dry-run: no files were created.", :yellow
|
|
164
|
+
else
|
|
165
|
+
say "Next steps:"
|
|
166
|
+
say " 1. Review the generated migration files in db/migrate (or db/pgbus_migrate)"
|
|
167
|
+
say " 2. Run: rails db:migrate#{":#{effective_database_name}" if effective_database_name}"
|
|
168
|
+
say " 3. Restart pgbus: bin/pgbus start"
|
|
169
|
+
end
|
|
57
170
|
say ""
|
|
58
171
|
end
|
|
59
172
|
|
|
@@ -70,6 +183,46 @@ module Pgbus
|
|
|
70
183
|
rescue ConfigConverter::Error, Psych::Exception, Errno::ENOENT, Errno::EACCES => e
|
|
71
184
|
raise Thor::Error, "Failed to convert #{options[:source]}: #{e.message}"
|
|
72
185
|
end
|
|
186
|
+
|
|
187
|
+
def active_record_available?
|
|
188
|
+
defined?(::ActiveRecord::Base) && ::ActiveRecord::Base.respond_to?(:connection)
|
|
189
|
+
end
|
|
190
|
+
|
|
191
|
+
# Resolve the AR connection to inspect. If pgbus is configured to
|
|
192
|
+
# use a separate database (via connects_to), use BusRecord's
|
|
193
|
+
# connection so the detector probes the right schema.
|
|
194
|
+
def resolve_connection
|
|
195
|
+
if defined?(Pgbus) && Pgbus.respond_to?(:configuration) && Pgbus.configuration.connects_to
|
|
196
|
+
Pgbus::BusRecord.connection
|
|
197
|
+
else
|
|
198
|
+
::ActiveRecord::Base.connection
|
|
199
|
+
end
|
|
200
|
+
rescue StandardError => e
|
|
201
|
+
say " ! could not resolve AR connection: #{e.class}: #{e.message}", :red
|
|
202
|
+
nil
|
|
203
|
+
end
|
|
204
|
+
|
|
205
|
+
def detected_database_name
|
|
206
|
+
@detected_database_name ||= DatabaseTargetDetector.new(
|
|
207
|
+
destination_root: destination_root
|
|
208
|
+
).detect
|
|
209
|
+
end
|
|
210
|
+
|
|
211
|
+
def effective_database_name
|
|
212
|
+
options[:database] || detected_database_name
|
|
213
|
+
end
|
|
214
|
+
|
|
215
|
+
def log(message)
|
|
216
|
+
return if options[:quiet]
|
|
217
|
+
|
|
218
|
+
say message
|
|
219
|
+
end
|
|
220
|
+
|
|
221
|
+
def log_change(message)
|
|
222
|
+
return if options[:quiet]
|
|
223
|
+
|
|
224
|
+
say message, :yellow
|
|
225
|
+
end
|
|
73
226
|
end
|
|
74
227
|
end
|
|
75
228
|
end
|
|
@@ -151,15 +151,23 @@ module Pgbus
|
|
|
151
151
|
end
|
|
152
152
|
|
|
153
153
|
# Returns [constant_settings, varying_settings].
|
|
154
|
-
# constant_settings: { "key" => value } (same value
|
|
154
|
+
# constant_settings: { "key" => value } (same value in EVERY env)
|
|
155
155
|
# varying_settings: { "key" => { env => value, ... } }
|
|
156
|
+
#
|
|
157
|
+
# A setting is only "constant" when it is present in every env
|
|
158
|
+
# and all envs agree on the value. If any env is missing the
|
|
159
|
+
# setting entirely (e.g. `polling_interval: 0.01` set only under
|
|
160
|
+
# `test:`), emitting it as an unconditional line would silently
|
|
161
|
+
# apply the value to envs that never asked for it — see #93.
|
|
156
162
|
def partition_by_variance(all_settings)
|
|
157
163
|
constant = {}
|
|
158
164
|
varying = {}
|
|
159
165
|
all_settings.each do |key, env_values|
|
|
160
166
|
present_values = env_values.reject { |_, v| v == :__missing__ }
|
|
161
167
|
unique_values = present_values.values.uniq
|
|
162
|
-
|
|
168
|
+
all_envs_present = present_values.size == env_values.size
|
|
169
|
+
|
|
170
|
+
if all_envs_present && unique_values.size <= 1
|
|
163
171
|
constant[key] = unique_values.first
|
|
164
172
|
else
|
|
165
173
|
varying[key] = present_values
|
|
@@ -182,6 +190,18 @@ module Pgbus
|
|
|
182
190
|
|
|
183
191
|
def render_varying_setting(key, env_values)
|
|
184
192
|
envs = env_values.keys
|
|
193
|
+
|
|
194
|
+
# Single-env coverage: the setting exists in exactly one env.
|
|
195
|
+
# Emit an `if Rails.env.X?` modifier rather than a case block
|
|
196
|
+
# so other envs fall back to the gem default. This is the
|
|
197
|
+
# fix for #93 — without it, a `test:`-only `polling_interval`
|
|
198
|
+
# would leak into dev and prod as an unconditional assignment.
|
|
199
|
+
if envs.size == 1
|
|
200
|
+
env = envs.first
|
|
201
|
+
value = env_values[env]
|
|
202
|
+
return ["c.#{key} = #{render_value(key, value)} if Rails.env.#{env}?"]
|
|
203
|
+
end
|
|
204
|
+
|
|
185
205
|
if envs.size == 2 && envs.include?("development")
|
|
186
206
|
# Special case: "everything except dev" — common pattern
|
|
187
207
|
non_dev_value = env_values.except("development").values.first
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Pgbus
|
|
4
|
+
module Generators
|
|
5
|
+
# Detects whether the host application has configured pgbus to use
|
|
6
|
+
# a separate database via connects_to, and returns the database name
|
|
7
|
+
# that --database flags should be set to for sub-generators.
|
|
8
|
+
#
|
|
9
|
+
# Detection sources, in priority order:
|
|
10
|
+
#
|
|
11
|
+
# 1. Pgbus.configuration.connects_to (runtime — authoritative if
|
|
12
|
+
# the initializer has already booted)
|
|
13
|
+
# 2. config/initializers/pgbus.rb (text scan)
|
|
14
|
+
# 3. config/application.rb (text scan — fallback for apps that
|
|
15
|
+
# call connects_to in application config instead of the pgbus
|
|
16
|
+
# initializer)
|
|
17
|
+
#
|
|
18
|
+
# Returns a String (the database name, e.g. "pgbus") or nil if no
|
|
19
|
+
# separate database is configured.
|
|
20
|
+
class DatabaseTargetDetector
|
|
21
|
+
# Matches:
|
|
22
|
+
# c.connects_to = { database: { writing: :pgbus } }
|
|
23
|
+
# c.connects_to(database: { writing: :queue_db })
|
|
24
|
+
# Pgbus.configuration.connects_to = { database: { writing: :pgbus } }
|
|
25
|
+
#
|
|
26
|
+
# Rejects:
|
|
27
|
+
# c.connects_to = { role: :writing } (different API shape)
|
|
28
|
+
# connects_to :something (no database: key)
|
|
29
|
+
#
|
|
30
|
+
# The [^;\n]*? and [^}]*? laziness keeps the scan within a
|
|
31
|
+
# reasonable window so a stray "writing:" later in the file
|
|
32
|
+
# can't cross-contaminate.
|
|
33
|
+
CONNECTS_TO_PATTERN = /connects_to\b[^;\n]*?database\s*:\s*\{[^}]*?writing\s*:\s*:?(?<name>[a-zA-Z_][a-zA-Z0-9_]*)/m
|
|
34
|
+
|
|
35
|
+
def initialize(destination_root:)
|
|
36
|
+
@destination_root = destination_root
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
# Returns the database name string if a separate DB is configured,
|
|
40
|
+
# nil otherwise. Checks runtime config first, then falls back to
|
|
41
|
+
# static file scanning.
|
|
42
|
+
def detect
|
|
43
|
+
runtime_database_name || scan_initializer || scan_application_config
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
private
|
|
47
|
+
|
|
48
|
+
attr_reader :destination_root
|
|
49
|
+
|
|
50
|
+
# Runtime path: Pgbus.configuration.connects_to. Only works if the
|
|
51
|
+
# host app has loaded pgbus AND booted the initializer. The update
|
|
52
|
+
# generator runs after Rails app boot so this is usually available.
|
|
53
|
+
def runtime_database_name
|
|
54
|
+
return nil unless defined?(Pgbus) && Pgbus.respond_to?(:configuration)
|
|
55
|
+
|
|
56
|
+
extract_database_name(Pgbus.configuration.connects_to)
|
|
57
|
+
rescue StandardError
|
|
58
|
+
nil
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
def scan_initializer
|
|
62
|
+
scan_file(File.join(destination_root, "config", "initializers", "pgbus.rb"))
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
def scan_application_config
|
|
66
|
+
scan_file(File.join(destination_root, "config", "application.rb"))
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
def scan_file(path)
|
|
70
|
+
return nil unless File.exist?(path)
|
|
71
|
+
|
|
72
|
+
content = File.read(path)
|
|
73
|
+
match = content.match(CONNECTS_TO_PATTERN)
|
|
74
|
+
return nil unless match
|
|
75
|
+
|
|
76
|
+
match[:name]
|
|
77
|
+
rescue StandardError
|
|
78
|
+
nil
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
# Parses `{ database: { writing: :name } }` or the String variant
|
|
82
|
+
# into the database name. Returns nil for anything we don't
|
|
83
|
+
# recognize.
|
|
84
|
+
def extract_database_name(connects_to)
|
|
85
|
+
return nil unless connects_to.is_a?(Hash)
|
|
86
|
+
|
|
87
|
+
db = connects_to[:database] || connects_to["database"]
|
|
88
|
+
return nil unless db.is_a?(Hash)
|
|
89
|
+
|
|
90
|
+
(db[:writing] || db["writing"])&.to_s
|
|
91
|
+
end
|
|
92
|
+
end
|
|
93
|
+
end
|
|
94
|
+
end
|
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Pgbus
|
|
4
|
+
module Generators
|
|
5
|
+
# Inspects a live ActiveRecord connection and determines which of
|
|
6
|
+
# pgbus's migration generators need to run to bring the schema up
|
|
7
|
+
# to date.
|
|
8
|
+
#
|
|
9
|
+
# Usage:
|
|
10
|
+
#
|
|
11
|
+
# detector = Pgbus::Generators::MigrationDetector.new(connection)
|
|
12
|
+
# detector.missing_migrations
|
|
13
|
+
# # => [:add_uniqueness_keys, :add_job_stats_queue_index, ...]
|
|
14
|
+
#
|
|
15
|
+
# The returned symbols correspond to generator names that the
|
|
16
|
+
# pgbus:update generator invokes via Thor composition. Each symbol
|
|
17
|
+
# maps to exactly one generator (see GENERATOR_MAP).
|
|
18
|
+
#
|
|
19
|
+
# Detection rules:
|
|
20
|
+
#
|
|
21
|
+
# 1. Fresh install (no core tables) → returns [:fresh_install] as a
|
|
22
|
+
# sentinel. The caller should tell the user to run pgbus:install
|
|
23
|
+
# instead of stacking 8 migrations.
|
|
24
|
+
#
|
|
25
|
+
# 2. Core tables missing → queued unconditionally. These are features
|
|
26
|
+
# pgbus assumes are present.
|
|
27
|
+
#
|
|
28
|
+
# 3. Opt-in feature tables missing → queued unconditionally. The user
|
|
29
|
+
# asked for update-to-latest, so we add the migration files but
|
|
30
|
+
# don't enable the feature in config. They'll opt in separately.
|
|
31
|
+
#
|
|
32
|
+
# 4. Columns missing on existing tables → queued. These are in-place
|
|
33
|
+
# schema upgrades (e.g. add_job_stats_latency adds
|
|
34
|
+
# enqueue_latency_ms + retry_count).
|
|
35
|
+
#
|
|
36
|
+
# 5. Indexes missing on existing tables → queued. Additive, safe.
|
|
37
|
+
#
|
|
38
|
+
# 6. Modern replacements for legacy tables (e.g. pgbus_job_locks →
|
|
39
|
+
# pgbus_uniqueness_keys) → queue the migration path only if the
|
|
40
|
+
# legacy table still exists. Otherwise queue the fresh install.
|
|
41
|
+
class MigrationDetector
|
|
42
|
+
# Sentinel returned when the database looks empty of pgbus tables.
|
|
43
|
+
# The caller (pgbus:update generator) should redirect the user to
|
|
44
|
+
# pgbus:install rather than trying to stack the full schema as
|
|
45
|
+
# individual add_* migrations.
|
|
46
|
+
FRESH_INSTALL = :fresh_install
|
|
47
|
+
|
|
48
|
+
# The set of tables that the base pgbus:install migration creates.
|
|
49
|
+
# If NONE of these exist, we treat the DB as a fresh install.
|
|
50
|
+
CORE_INSTALL_TABLES = %w[
|
|
51
|
+
pgbus_processed_events
|
|
52
|
+
pgbus_processes
|
|
53
|
+
pgbus_failed_events
|
|
54
|
+
pgbus_semaphores
|
|
55
|
+
pgbus_blocked_executions
|
|
56
|
+
pgbus_batches
|
|
57
|
+
].freeze
|
|
58
|
+
|
|
59
|
+
# generator_key → Rails generator name. Passed to Thor's invoke.
|
|
60
|
+
#
|
|
61
|
+
# Note: uniqueness_keys uses the migrate_job_locks generator for
|
|
62
|
+
# both the fresh-install and upgrade-from-job_locks paths. The
|
|
63
|
+
# template is idempotent: `unless table_exists?(:pgbus_uniqueness_keys)`
|
|
64
|
+
# creates it, and `if table_exists?(:pgbus_job_locks)` drops the
|
|
65
|
+
# legacy table. One generator covers both cases.
|
|
66
|
+
GENERATOR_MAP = {
|
|
67
|
+
uniqueness_keys: "pgbus:migrate_job_locks",
|
|
68
|
+
add_job_stats: "pgbus:add_job_stats",
|
|
69
|
+
add_job_stats_latency: "pgbus:add_job_stats_latency",
|
|
70
|
+
add_job_stats_queue_index: "pgbus:add_job_stats_queue_index",
|
|
71
|
+
add_stream_stats: "pgbus:add_stream_stats",
|
|
72
|
+
add_presence: "pgbus:add_presence",
|
|
73
|
+
add_queue_states: "pgbus:add_queue_states",
|
|
74
|
+
add_outbox: "pgbus:add_outbox",
|
|
75
|
+
add_recurring: "pgbus:add_recurring",
|
|
76
|
+
add_failed_events_index: "pgbus:add_failed_events_index"
|
|
77
|
+
}.freeze
|
|
78
|
+
|
|
79
|
+
# Human-friendly description of each migration for the generator
|
|
80
|
+
# output. Keeps the update generator's run log readable.
|
|
81
|
+
DESCRIPTIONS = {
|
|
82
|
+
uniqueness_keys: "uniqueness keys table (job deduplication, also upgrades legacy job_locks if present)",
|
|
83
|
+
add_job_stats: "job stats table (Insights dashboard)",
|
|
84
|
+
add_job_stats_latency: "job stats latency columns (enqueue_latency_ms, retry_count)",
|
|
85
|
+
add_job_stats_queue_index: "job stats (queue_name, created_at) index",
|
|
86
|
+
add_stream_stats: "stream stats table (opt-in real-time Insights)",
|
|
87
|
+
add_presence: "presence members table (Turbo Streams presence)",
|
|
88
|
+
add_queue_states: "queue states table (pause/resume)",
|
|
89
|
+
add_outbox: "outbox entries table (transactional outbox)",
|
|
90
|
+
add_recurring: "recurring tasks + executions tables",
|
|
91
|
+
add_failed_events_index: "unique index on pgbus_failed_events (queue_name, msg_id)"
|
|
92
|
+
}.freeze
|
|
93
|
+
|
|
94
|
+
def initialize(connection)
|
|
95
|
+
@connection = connection
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
# Returns an Array of generator keys (symbols) in the order they
|
|
99
|
+
# should run. Dependencies are resolved implicitly via order: the
|
|
100
|
+
# base table creation for a feature always comes before the
|
|
101
|
+
# column/index add-ons.
|
|
102
|
+
def missing_migrations
|
|
103
|
+
return [FRESH_INSTALL] if fresh_install?
|
|
104
|
+
|
|
105
|
+
[
|
|
106
|
+
*uniqueness_key_migrations,
|
|
107
|
+
*job_stats_migrations,
|
|
108
|
+
*stream_stats_migrations,
|
|
109
|
+
*presence_migrations,
|
|
110
|
+
*queue_states_migrations,
|
|
111
|
+
*outbox_migrations,
|
|
112
|
+
*recurring_migrations,
|
|
113
|
+
*failed_events_index_migrations
|
|
114
|
+
]
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
private
|
|
118
|
+
|
|
119
|
+
attr_reader :connection
|
|
120
|
+
|
|
121
|
+
def fresh_install?
|
|
122
|
+
CORE_INSTALL_TABLES.none? { |t| table_exists?(t) }
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
# Legacy pgbus_job_locks → modern pgbus_uniqueness_keys.
|
|
126
|
+
# The migrate_job_locks template is idempotent: it creates
|
|
127
|
+
# uniqueness_keys if missing and drops job_locks if present.
|
|
128
|
+
# One symbol covers both cases; see GENERATOR_MAP.
|
|
129
|
+
def uniqueness_key_migrations
|
|
130
|
+
return [] if table_exists?("pgbus_uniqueness_keys")
|
|
131
|
+
|
|
132
|
+
[:uniqueness_keys]
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
def job_stats_migrations
|
|
136
|
+
migrations = []
|
|
137
|
+
|
|
138
|
+
unless table_exists?("pgbus_job_stats")
|
|
139
|
+
migrations << :add_job_stats
|
|
140
|
+
# The latency columns and queue index are add-ons to job_stats.
|
|
141
|
+
# If we're creating the base table now, the add_job_stats
|
|
142
|
+
# template doesn't include them — add the upgrade migrations
|
|
143
|
+
# so a fresh install lands on the current schema.
|
|
144
|
+
migrations << :add_job_stats_latency
|
|
145
|
+
migrations << :add_job_stats_queue_index
|
|
146
|
+
return migrations
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
# Base table exists — check each add-on independently.
|
|
150
|
+
cols = column_names("pgbus_job_stats")
|
|
151
|
+
migrations << :add_job_stats_latency unless cols.include?("enqueue_latency_ms") && cols.include?("retry_count")
|
|
152
|
+
|
|
153
|
+
migrations << :add_job_stats_queue_index unless index_exists?("pgbus_job_stats", "idx_pgbus_job_stats_queue_time")
|
|
154
|
+
|
|
155
|
+
migrations
|
|
156
|
+
end
|
|
157
|
+
|
|
158
|
+
def stream_stats_migrations
|
|
159
|
+
table_exists?("pgbus_stream_stats") ? [] : [:add_stream_stats]
|
|
160
|
+
end
|
|
161
|
+
|
|
162
|
+
def presence_migrations
|
|
163
|
+
table_exists?("pgbus_presence_members") ? [] : [:add_presence]
|
|
164
|
+
end
|
|
165
|
+
|
|
166
|
+
def queue_states_migrations
|
|
167
|
+
table_exists?("pgbus_queue_states") ? [] : [:add_queue_states]
|
|
168
|
+
end
|
|
169
|
+
|
|
170
|
+
def outbox_migrations
|
|
171
|
+
table_exists?("pgbus_outbox_entries") ? [] : [:add_outbox]
|
|
172
|
+
end
|
|
173
|
+
|
|
174
|
+
def recurring_migrations
|
|
175
|
+
# pgbus_recurring_tasks + pgbus_recurring_executions are the two
|
|
176
|
+
# tables the recurring generator creates. If BOTH exist, nothing
|
|
177
|
+
# to do. If either is missing, we need the generator (which is
|
|
178
|
+
# idempotent via if_not_exists on both tables).
|
|
179
|
+
return [] if table_exists?("pgbus_recurring_tasks") && table_exists?("pgbus_recurring_executions")
|
|
180
|
+
|
|
181
|
+
[:add_recurring]
|
|
182
|
+
end
|
|
183
|
+
|
|
184
|
+
def failed_events_index_migrations
|
|
185
|
+
# pgbus_failed_events is created by pgbus:install, but the
|
|
186
|
+
# unique (queue_name, msg_id) index was added later via its own
|
|
187
|
+
# generator. If the table exists without the unique index,
|
|
188
|
+
# FailedEventRecorder's upsert silently swallows ON CONFLICT
|
|
189
|
+
# errors — so this is a real bug waiting to bite.
|
|
190
|
+
return [] unless table_exists?("pgbus_failed_events")
|
|
191
|
+
return [] if index_exists?("pgbus_failed_events", "idx_pgbus_failed_events_queue_msg")
|
|
192
|
+
|
|
193
|
+
[:add_failed_events_index]
|
|
194
|
+
end
|
|
195
|
+
|
|
196
|
+
# --- schema probes -------------------------------------------------
|
|
197
|
+
|
|
198
|
+
def table_exists?(name)
|
|
199
|
+
connection.table_exists?(name)
|
|
200
|
+
rescue StandardError
|
|
201
|
+
false
|
|
202
|
+
end
|
|
203
|
+
|
|
204
|
+
def column_names(table)
|
|
205
|
+
connection.columns(table).map(&:name)
|
|
206
|
+
rescue StandardError
|
|
207
|
+
[]
|
|
208
|
+
end
|
|
209
|
+
|
|
210
|
+
def index_exists?(table, index_name)
|
|
211
|
+
connection.indexes(table).any? { |idx| idx.name == index_name }
|
|
212
|
+
rescue StandardError
|
|
213
|
+
false
|
|
214
|
+
end
|
|
215
|
+
end
|
|
216
|
+
end
|
|
217
|
+
end
|
data/lib/pgbus/version.rb
CHANGED
|
@@ -38,7 +38,10 @@ module Pgbus
|
|
|
38
38
|
# different connection lifecycle than the worker processes).
|
|
39
39
|
def queues_with_metrics
|
|
40
40
|
queue_names = connection.select_values("SELECT queue_name FROM pgmq.meta ORDER BY queue_name")
|
|
41
|
-
|
|
41
|
+
# paused_queue_names returns an Array; convert to Set so the
|
|
42
|
+
# per-queue membership check is O(1). With 100+ queues the
|
|
43
|
+
# Array#include? cost in the loop was O(n²) per dashboard load.
|
|
44
|
+
paused_queues = paused_queue_names.to_set
|
|
42
45
|
queue_names.map { |name| queue_metrics_via_sql(name) }.compact.map do |q|
|
|
43
46
|
q.merge(paused: paused_queues.include?(logical_queue_name(q[:name])))
|
|
44
47
|
end
|
|
@@ -263,11 +266,7 @@ module Pgbus
|
|
|
263
266
|
queues = queues_with_metrics.select { |q| q[:name].end_with?(dlq_suffix) }
|
|
264
267
|
offset = (page - 1) * per_page
|
|
265
268
|
|
|
266
|
-
|
|
267
|
-
query_queue_messages_raw(q[:name], per_page + offset, 0)
|
|
268
|
-
end
|
|
269
|
-
|
|
270
|
-
messages.sort_by { |m| -m[:msg_id].to_i }.slice(offset, per_page) || []
|
|
269
|
+
paginated_queue_messages(queues.map { |q| q[:name] }, per_page, offset)
|
|
271
270
|
rescue StandardError => e
|
|
272
271
|
Pgbus.logger.debug { "[Pgbus::Web] Error fetching DLQ messages: #{e.message}" }
|
|
273
272
|
[]
|
|
@@ -695,10 +694,41 @@ module Pgbus
|
|
|
695
694
|
def all_queue_messages(limit, offset)
|
|
696
695
|
dlq_suffix = Pgbus::DEAD_LETTER_SUFFIX
|
|
697
696
|
queues = queues_with_metrics.reject { |q| q[:name].end_with?(dlq_suffix) }
|
|
698
|
-
|
|
699
|
-
|
|
697
|
+
paginated_queue_messages(queues.map { |q| q[:name] }, limit, offset)
|
|
698
|
+
end
|
|
699
|
+
|
|
700
|
+
# Returns messages from multiple PGMQ queues in a single paginated
|
|
701
|
+
# query. Builds a UNION ALL across the target tables and pushes the
|
|
702
|
+
# ORDER BY msg_id DESC + LIMIT + OFFSET down to Postgres so we don't
|
|
703
|
+
# load (limit + offset) rows from every queue into Ruby just to slice
|
|
704
|
+
# out one page. Returns [] if queue_names is empty.
|
|
705
|
+
#
|
|
706
|
+
# Each UNION ALL fragment selects a literal queue name so the outer
|
|
707
|
+
# query can tag every row with its source queue — the pre-SQL
|
|
708
|
+
# implementation tagged it from the iteration variable. The queue
|
|
709
|
+
# name goes through sanitize_name (which calls QueueNameValidator)
|
|
710
|
+
# so it's safe to interpolate into both the schema-qualified table
|
|
711
|
+
# and the literal column. limit/offset are bound parameters.
|
|
712
|
+
def paginated_queue_messages(queue_names, limit, offset)
|
|
713
|
+
return [] if queue_names.empty?
|
|
714
|
+
|
|
715
|
+
sanitized = queue_names.map { |name| [name, sanitize_name(name)] }
|
|
716
|
+
fragments = sanitized.map do |(name, qtable)|
|
|
717
|
+
<<~SQL.strip
|
|
718
|
+
SELECT msg_id, read_ct, enqueued_at, last_read_at, vt, message, headers,
|
|
719
|
+
'#{name}' AS queue_name
|
|
720
|
+
FROM pgmq.q_#{qtable}
|
|
721
|
+
SQL
|
|
700
722
|
end
|
|
701
|
-
|
|
723
|
+
|
|
724
|
+
sql = <<~SQL
|
|
725
|
+
SELECT * FROM (#{fragments.join("\nUNION ALL\n")}) AS combined
|
|
726
|
+
ORDER BY msg_id DESC
|
|
727
|
+
LIMIT $1 OFFSET $2
|
|
728
|
+
SQL
|
|
729
|
+
|
|
730
|
+
rows = connection.select_all(sql, "Pgbus Paginated Queue Messages", [limit, offset])
|
|
731
|
+
rows.to_a.map { |r| format_message(r, r["queue_name"]) }
|
|
702
732
|
end
|
|
703
733
|
|
|
704
734
|
def queue_metrics_via_sql(queue_name)
|
|
@@ -903,12 +933,31 @@ module Pgbus
|
|
|
903
933
|
end
|
|
904
934
|
|
|
905
935
|
# Archive every queue message referenced by a failed_event row.
|
|
936
|
+
# Groups by queue and uses archive_batch so an N-event discard is
|
|
937
|
+
# one SQL statement per queue instead of N per-row roundtrips.
|
|
938
|
+
# Falls back to per-row archive inside a rescue if a batch fails,
|
|
939
|
+
# so one bad queue can't block progress on the others.
|
|
906
940
|
def archive_all_failed_messages
|
|
907
941
|
rows = connection.select_all(
|
|
908
942
|
"SELECT id, queue_name, msg_id FROM pgbus_failed_events WHERE msg_id IS NOT NULL",
|
|
909
943
|
"Pgbus Collect Failed Messages"
|
|
910
944
|
)
|
|
911
|
-
|
|
945
|
+
|
|
946
|
+
grouped = rows.to_a.group_by { |row| row["queue_name"] }
|
|
947
|
+
grouped.each do |queue_name, events|
|
|
948
|
+
msg_ids = events.filter_map { |row| row["msg_id"]&.to_i }
|
|
949
|
+
next if msg_ids.empty?
|
|
950
|
+
|
|
951
|
+
begin
|
|
952
|
+
@client.archive_batch(queue_name, msg_ids)
|
|
953
|
+
rescue StandardError => e
|
|
954
|
+
Pgbus.logger.debug do
|
|
955
|
+
"[Pgbus::Web] archive_batch failed for #{queue_name} (#{e.message}); " \
|
|
956
|
+
"falling back to per-row archive"
|
|
957
|
+
end
|
|
958
|
+
events.each { |row| archive_failed_message(row) }
|
|
959
|
+
end
|
|
960
|
+
end
|
|
912
961
|
rescue StandardError => e
|
|
913
962
|
Pgbus.logger.debug { "[Pgbus::Web] Error archiving failed messages: #{e.message}" }
|
|
914
963
|
end
|
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: pgbus
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 0.6.
|
|
4
|
+
version: 0.6.1
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Mikael Henriksson
|
|
@@ -205,6 +205,7 @@ files:
|
|
|
205
205
|
- lib/generators/pgbus/add_job_locks_generator.rb
|
|
206
206
|
- lib/generators/pgbus/add_job_stats_generator.rb
|
|
207
207
|
- lib/generators/pgbus/add_job_stats_latency_generator.rb
|
|
208
|
+
- lib/generators/pgbus/add_job_stats_queue_index_generator.rb
|
|
208
209
|
- lib/generators/pgbus/add_outbox_generator.rb
|
|
209
210
|
- lib/generators/pgbus/add_presence_generator.rb
|
|
210
211
|
- lib/generators/pgbus/add_queue_states_generator.rb
|
|
@@ -216,6 +217,7 @@ files:
|
|
|
216
217
|
- lib/generators/pgbus/templates/add_job_locks.rb.erb
|
|
217
218
|
- lib/generators/pgbus/templates/add_job_stats.rb.erb
|
|
218
219
|
- lib/generators/pgbus/templates/add_job_stats_latency.rb.erb
|
|
220
|
+
- lib/generators/pgbus/templates/add_job_stats_queue_index.rb.erb
|
|
219
221
|
- lib/generators/pgbus/templates/add_outbox.rb.erb
|
|
220
222
|
- lib/generators/pgbus/templates/add_presence.rb.erb
|
|
221
223
|
- lib/generators/pgbus/templates/add_queue_states.rb.erb
|
|
@@ -255,6 +257,8 @@ files:
|
|
|
255
257
|
- lib/pgbus/event_bus/subscriber.rb
|
|
256
258
|
- lib/pgbus/failed_event_recorder.rb
|
|
257
259
|
- lib/pgbus/generators/config_converter.rb
|
|
260
|
+
- lib/pgbus/generators/database_target_detector.rb
|
|
261
|
+
- lib/pgbus/generators/migration_detector.rb
|
|
258
262
|
- lib/pgbus/instrumentation.rb
|
|
259
263
|
- lib/pgbus/outbox.rb
|
|
260
264
|
- lib/pgbus/outbox/poller.rb
|