postburner 1.0.0.pre.13 → 1.0.0.pre.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,153 +1,38 @@
1
- # Postburner Configuration Example
1
+ # Postburner Configuration
2
2
  #
3
- # Copy this file to config/postburner.yml and customize for your environment.
3
+ # See docs/configuration.md for advanced options including:
4
+ # - Multiple workers with different concurrency profiles
5
+ # - Fork/thread tuning for production deployments
6
+ # - Scheduler configuration
4
7
  #
5
- # ## Named Workers Configuration
6
- #
7
- # Postburner uses named worker configurations to support different deployment patterns:
8
- # - Single worker: bin/postburner (auto-selects the single worker)
9
- # - Multiple workers: bin/postburner --worker <name> (must specify which worker)
10
- #
11
- # Each worker can have different fork/thread settings and process different queues.
12
- # This enables running different queue groups in separate OS processes with distinct
13
- # concurrency profiles.
14
- #
15
- # ## Puma-Style Architecture
16
- #
17
- # - **forks: 0** = Single process with thread pool (development/staging)
18
- # - **forks: 1+** = Multiple processes with thread pools (production)
19
- #
20
- # Scale by adjusting forks and threads per worker:
21
- # - Development: forks=0, threads=1 (single-threaded, easiest debugging)
22
- # - Staging: forks=0, threads=10 (multi-threaded, moderate load)
23
- # - Production: forks=4, threads=10 (40 concurrent jobs per worker)
8
+ # Start the worker with:
9
+ # bin/postburner
10
+ # bin/postburner --worker default
11
+ # rake postburner:work
12
+ # rake postburner:work WORKER=default
24
13
  #
14
+ # With a single worker defined, it's auto-selected. Add more workers under
15
+ # `workers:` for different queue sets (see docs/configuration.md).
25
16
 
26
- default: &default
27
- # Beanstalkd connection URL
28
- # Override with ENV['BEANSTALK_URL'] if set
17
+ shared: &shared
29
18
  beanstalk_url: <%= ENV['BEANSTALK_URL'] || 'beanstalk://localhost:11300' %>
30
-
31
- # Scheduler configuration
32
- # The scheduler runs as a lightweight "watchdog" job that checks for due
33
- # schedule executions and enqueues them. All workers automatically watch
34
- # the scheduler queue and will process the watchdog when it becomes due.
35
- default_scheduler_interval: 300 # Check for due schedules every 5 minutes (300 seconds)
36
- default_scheduler_priority: 100 # Scheduler watchdog priority (lower = higher priority)
37
-
38
- development:
39
- <<: *default
40
-
41
19
  workers:
42
20
  default:
43
- # Single-threaded, single process (simplest for debugging)
44
- # If not specified, uses env-level defaults
45
21
  queues:
46
22
  - default
47
23
  - mailers
48
24
 
49
- test:
50
- <<: *default
51
-
52
- workers:
53
- default:
54
- # Test mode uses inline strategies automatically
55
- queues:
56
- - default
57
-
58
- staging: # <- environment config
59
- <<: *default
60
-
61
- # Env-level defaults (use default_ prefix)
62
- default_threads: 10
63
- default_gc_limit: 5000
64
-
65
- workers: # <- worker config overrides
66
- default:
67
- # Multi-threaded, single process (moderate concurrency)
68
- # Uses env-level defaults: default_threads=10, default_gc_limit=5000
69
- queues:
70
- - critical
71
- - default
72
- - mailers
25
+ # Single process, single thread (forks: 0, threads: 1)
26
+ development:
27
+ <<: *shared
73
28
 
74
- production: # <- environment config, i.e. defaults, NOT worker config
75
- <<: *default
29
+ # Inline execution (jobs run immediately in-process)
30
+ test:
31
+ <<: *shared
76
32
 
77
- # Env-level defaults (use default_ prefix)
33
+ # Multi-process, multi-thread (forks: 2, threads: 4)
34
+ production:
35
+ <<: *shared
78
36
  default_forks: 2
79
- default_threads: 10
80
- default_gc_limit: 5000
81
-
82
- # Example 1: Single worker using env defaults
83
- # Run: bin/postburner
84
- #
85
- # workers:
86
- # default:
87
- # # Uses default_forks=2, default_threads=10
88
- # queues:
89
- # - critical
90
- # - default
91
- # - mailers
92
- # - imports
93
-
94
- # Example 2: Multiple workers with different concurrency profiles
95
- # Run separate processes:
96
- # bin/postburner --worker imports (4 forks, 1 thread each)
97
- # bin/postburner --worker general (2 forks, 100 threads each)
98
- #
99
- workers: # <- worker config, i.e. overrides, NOT environment config
100
- # Heavy, memory-intensive jobs - more processes, fewer threads
101
- imports:
102
- forks: 4 # Overrides default_forks
103
- threads: 1 # Overrides default_threads
104
- gc_limit: 500 # Overrides default_gc_limit
105
- queues:
106
- - imports
107
- - data_processing
108
-
109
- # General jobs - uses env defaults (forks=2, threads=10)
110
- # Override threads for higher concurrency
111
- general:
112
- threads: 100 # Overrides default_threads (forks uses default_forks=2)
113
- queues:
114
- - default
115
- - mailers
116
- - notifications
117
-
118
- # Example 3: Fine-grained control with multiple specialized workers
119
- # Run separate processes:
120
- # bin/postburner --worker critical
121
- # bin/postburner --worker default
122
- # bin/postburner --worker mailers
123
- #
124
- # workers:
125
- # critical:
126
- # forks: 1
127
- # threads: 1
128
- # gc_limit: 100
129
- # queues:
130
- # - critical
131
- #
132
- # default:
133
- # forks: 4
134
- # threads: 10
135
- # queues:
136
- # - default
137
- #
138
- # mailers:
139
- # forks: 2
140
- # threads: 5
141
- # queues:
142
- # - mailers
143
-
144
- # Env-Level Defaults (can be overridden per worker):
145
- #
146
- # default_queue: default # Default queue name (optional)
147
- # default_priority: 65536 # Lower = higher priority (optional, 0 is highest)
148
- # default_ttr: 300 # Time-to-run in seconds (optional)
149
- # default_threads: 1 # Thread count per fork (optional, defaults to 1)
150
- # default_forks: 0 # Fork count (optional, defaults to 0 = single process)
151
- # default_gc_limit: nil # Exit after N jobs for restart (optional, nil = no limit)
152
- # default_scheduler_interval: 300 # Scheduler check interval in seconds (optional, default: 300)
153
- # default_scheduler_priority: 100 # Scheduler watchdog priority (optional, default: 100)
37
+ default_threads: 4
38
+ default_gc_limit: 64
@@ -152,6 +152,10 @@ module ActiveJob
152
152
  # Creates a Postburner::TrackedJob record, then queues minimal payload
153
153
  # to Beanstalkd with just the job ID reference.
154
154
  #
155
+ # Instruments with ActiveSupport::Notifications:
156
+ # - enqueue.job.postburner: When job is queued immediately
157
+ # - enqueue_at.job.postburner: When job is queued with delay
158
+ #
155
159
  # @param job [ActiveJob::Base] The job instance
156
160
  # @param timestamp [Time, nil] When to execute the job
157
161
  #
@@ -169,6 +173,7 @@ module ActiveJob
169
173
  delay = timestamp ? [timestamp.to_i - Time.current.to_i, 0].max : 0
170
174
 
171
175
  # Queue to Beanstalkd with minimal payload
176
+ bkid = nil
172
177
  Postburner.connected do |conn|
173
178
  tube_name = expand_tube_name(job.queue_name)
174
179
  opts = job_options(job)
@@ -183,6 +188,26 @@ module ActiveJob
183
188
  # Update tracked_job with Beanstalkd ID
184
189
  tracked_job.update_column(:bkid, bkid)
185
190
  end
191
+
192
+ # Instrument enqueue event
193
+ job_payload = Postburner::Instrumentation.job_payload_from_activejob(
194
+ job,
195
+ tracked: true,
196
+ postburner_job_id: tracked_job.id,
197
+ beanstalk_job_id: bkid
198
+ )
199
+ scheduled_at = timestamp ? Time.zone.at(timestamp) : nil
200
+
201
+ if scheduled_at && scheduled_at > Time.current
202
+ ActiveSupport::Notifications.instrument('enqueue_at.job.postburner', {
203
+ job: job_payload,
204
+ scheduled_at: scheduled_at
205
+ })
206
+ else
207
+ ActiveSupport::Notifications.instrument('enqueue.job.postburner', {
208
+ job: job_payload
209
+ })
210
+ end
186
211
  end
187
212
 
188
213
  # Enqueues a default job (Beanstalkd only, no PostgreSQL).
@@ -190,6 +215,10 @@ module ActiveJob
190
215
  # Queues full job data to Beanstalkd for fast execution without
191
216
  # PostgreSQL overhead.
192
217
  #
218
+ # Instruments with ActiveSupport::Notifications:
219
+ # - enqueue.job.postburner: When job is queued immediately
220
+ # - enqueue_at.job.postburner: When job is queued with delay
221
+ #
193
222
  # @param job [ActiveJob::Base] The job instance
194
223
  # @param timestamp [Time, nil] When to execute the job
195
224
  #
@@ -197,18 +226,39 @@ module ActiveJob
197
226
  #
198
227
  def enqueue_default(job, timestamp)
199
228
  delay = timestamp ? [timestamp.to_i - Time.current.to_i, 0].max : 0
229
+ bkid = nil
200
230
 
201
231
  Postburner.connected do |conn|
202
232
  tube_name = expand_tube_name(job.queue_name)
203
233
  opts = job_options(job)
204
234
 
205
- conn.tubes[tube_name].put(
235
+ bkid = conn.tubes[tube_name].put(
206
236
  Postburner::ActiveJob::Payload.default_payload(job),
207
237
  pri: opts[:pri],
208
238
  delay: delay,
209
239
  ttr: opts[:ttr]
210
240
  )
211
241
  end
242
+
243
+ # Instrument enqueue event
244
+ job_payload = Postburner::Instrumentation.job_payload_from_activejob(
245
+ job,
246
+ tracked: false,
247
+ postburner_job_id: nil,
248
+ beanstalk_job_id: bkid
249
+ )
250
+ scheduled_at = timestamp ? Time.zone.at(timestamp) : nil
251
+
252
+ if scheduled_at && scheduled_at > Time.current
253
+ ActiveSupport::Notifications.instrument('enqueue_at.job.postburner', {
254
+ job: job_payload,
255
+ scheduled_at: scheduled_at
256
+ })
257
+ else
258
+ ActiveSupport::Notifications.instrument('enqueue.job.postburner', {
259
+ job: job_payload
260
+ })
261
+ end
212
262
  end
213
263
 
214
264
  # Expands queue name to full tube name with environment prefix.
@@ -0,0 +1,196 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Postburner
4
+ # Instrumentation helpers for ActiveSupport::Notifications events.
5
+ #
6
+ # Provides standardized payload builders for all Postburner instrumentation events.
7
+ # Event naming follows the pattern: `verb.noun.postburner`
8
+ #
9
+ # ## Event Categories
10
+ #
11
+ # ### Job Events
12
+ # - `perform_start.job.postburner` - Before job execution
13
+ # - `perform.job.postburner` - Around job execution (includes duration)
14
+ # - `retry.job.postburner` - Default job retried
15
+ # - `discard.job.postburner` - Default job exhausts retries
16
+ # - `enqueue.job.postburner` - Job queued (immediate)
17
+ # - `enqueue_at.job.postburner` - Job queued with delay
18
+ # - `retry_stopped.job.postburner` - Tracked job buried after failures
19
+ #
20
+ # ### Schedule Events
21
+ # - `create.schedule.postburner` - Schedule created
22
+ # - `update.schedule.postburner` - Schedule updated
23
+ # - `audit.schedule.postburner` - Scheduler audits a schedule
24
+ #
25
+ # ### Schedule Execution Events
26
+ # - `create.schedule_execution.postburner` - Execution created
27
+ # - `enqueue.schedule_execution.postburner` - Execution enqueued to Beanstalkd
28
+ # - `skip.schedule_execution.postburner` - Execution skipped
29
+ #
30
+ # ### Scheduler Watchdog Events
31
+ # - `perform_start.scheduler.postburner` - Watchdog run begins
32
+ # - `perform.scheduler.postburner` - Around watchdog run (summary)
33
+ #
34
+ # @example Subscribing to job events
35
+ # ActiveSupport::Notifications.subscribe('perform.job.postburner') do |name, start, finish, id, payload|
36
+ # duration = (finish - start) * 1000
37
+ # Rails.logger.info "[Postburner] #{payload[:job][:class]} completed in #{duration.round(2)}ms"
38
+ # end
39
+ #
40
+ # @example Subscribing to schedule events
41
+ # ActiveSupport::Notifications.subscribe('create.schedule.postburner') do |*args|
42
+ # payload = args.last
43
+ # schedule = payload[:schedule]
44
+ # Rails.logger.info "[Postburner] Schedule '#{schedule[:name]}' created"
45
+ # end
46
+ #
47
+ module Instrumentation
48
+ module_function
49
+
50
+ # Build a job payload hash for instrumentation.
51
+ #
52
+ # @param job [Postburner::Job, Hash] Job instance or parsed payload hash
53
+ # @param beanstalk_job_id [Integer, nil] Beanstalkd job ID
54
+ # @return [Hash] Standardized job payload
55
+ #
56
+ def job_payload(job, beanstalk_job_id: nil)
57
+ if job.is_a?(Postburner::Job)
58
+ job_payload_from_model(job, beanstalk_job_id: beanstalk_job_id)
59
+ elsif job.is_a?(Hash)
60
+ job_payload_from_hash(job, beanstalk_job_id: beanstalk_job_id)
61
+ else
62
+ raise ArgumentError, "Expected Postburner::Job or Hash, got #{job.class}"
63
+ end
64
+ end
65
+
66
+ # Build job payload from a Postburner::Job model.
67
+ #
68
+ # @param job [Postburner::Job] Job model instance
69
+ # @param beanstalk_job_id [Integer, nil] Beanstalkd job ID
70
+ # @return [Hash] Standardized job payload
71
+ #
72
+ def job_payload_from_model(job, beanstalk_job_id: nil)
73
+ {
74
+ class: job.class.name,
75
+ id: job.id,
76
+ job_id: job.respond_to?(:args) && job.args.is_a?(Hash) ? job.args['job_id'] : nil,
77
+ arguments: job.args,
78
+ queue_name: job.respond_to?(:queue_name) ? job.queue_name : job.class.try(:postburner_queue),
79
+ beanstalk_job_id: beanstalk_job_id || job.bkid,
80
+ tracked: true
81
+ }
82
+ end
83
+
84
+ # Build job payload from a parsed Beanstalkd payload hash.
85
+ #
86
+ # Handles both ActiveJob format and legacy Postburner::Job format.
87
+ #
88
+ # @param payload [Hash] Parsed JSON payload from Beanstalkd
89
+ # @param beanstalk_job_id [Integer, nil] Beanstalkd job ID
90
+ # @return [Hash] Standardized job payload
91
+ #
92
+ def job_payload_from_hash(payload, beanstalk_job_id: nil)
93
+ if Postburner::ActiveJob::Payload.legacy_format?(payload)
94
+ # Legacy format: { "class" => "JobClass", "args" => [id] }
95
+ {
96
+ class: payload['class'],
97
+ id: payload['args']&.first,
98
+ job_id: nil,
99
+ arguments: payload['args'],
100
+ queue_name: nil,
101
+ beanstalk_job_id: beanstalk_job_id,
102
+ tracked: true
103
+ }
104
+ else
105
+ # ActiveJob format
106
+ tracked = payload['tracked'] == true
107
+ {
108
+ class: payload['job_class'],
109
+ id: tracked ? payload['postburner_job_id'] : nil,
110
+ job_id: payload['job_id'],
111
+ arguments: payload['arguments'],
112
+ queue_name: payload['queue_name'],
113
+ beanstalk_job_id: beanstalk_job_id,
114
+ tracked: tracked
115
+ }
116
+ end
117
+ end
118
+
119
+ # Build job payload from an ActiveJob instance.
120
+ #
121
+ # @param job [ActiveJob::Base] ActiveJob instance
122
+ # @param tracked [Boolean] Whether job is tracked in PostgreSQL
123
+ # @param postburner_job_id [Integer, nil] Postburner::TrackedJob ID if tracked
124
+ # @param beanstalk_job_id [Integer, nil] Beanstalkd job ID
125
+ # @return [Hash] Standardized job payload
126
+ #
127
+ def job_payload_from_activejob(job, tracked:, postburner_job_id: nil, beanstalk_job_id: nil)
128
+ {
129
+ class: job.class.name,
130
+ id: postburner_job_id,
131
+ job_id: job.job_id,
132
+ arguments: job.arguments,
133
+ queue_name: job.queue_name,
134
+ beanstalk_job_id: beanstalk_job_id,
135
+ tracked: tracked
136
+ }
137
+ end
138
+
139
+ # Build a schedule payload hash for instrumentation.
140
+ #
141
+ # @param schedule [Postburner::Schedule] Schedule model instance
142
+ # @return [Hash] Standardized schedule payload
143
+ #
144
+ def schedule_payload(schedule)
145
+ {
146
+ id: schedule.id,
147
+ name: schedule.name,
148
+ job_class: schedule.job_class,
149
+ enabled: schedule.enabled,
150
+ interval: schedule.interval,
151
+ interval_unit: schedule.interval_unit,
152
+ cron: schedule.cron,
153
+ timezone: schedule.timezone
154
+ }
155
+ end
156
+
157
+ # Build a schedule execution payload hash for instrumentation.
158
+ #
159
+ # @param execution [Postburner::ScheduleExecution] Execution model instance
160
+ # @return [Hash] Standardized execution payload
161
+ #
162
+ def execution_payload(execution)
163
+ {
164
+ id: execution.id,
165
+ schedule_id: execution.schedule_id,
166
+ run_at: execution.run_at,
167
+ next_run_at: execution.next_run_at,
168
+ status: execution.status,
169
+ beanstalk_job_id: execution.beanstalk_job_id,
170
+ job_id: execution.job_id
171
+ }
172
+ end
173
+
174
+ # Build changes hash from ActiveRecord model, excluding specified attributes.
175
+ #
176
+ # @param model [ActiveRecord::Base] Model with changes
177
+ # @param exclude [Array<String, Symbol>] Attributes to exclude
178
+ # @return [Hash] Changes hash in { attribute: [old, new] } format
179
+ #
180
+ def changes_payload(model, exclude: [])
181
+ exclude = exclude.map(&:to_s)
182
+ model.saved_changes.except(*exclude, 'updated_at', 'created_at')
183
+ end
184
+
185
+ # Instrument an event with ActiveSupport::Notifications.
186
+ #
187
+ # @param event [String] Event name (e.g., 'perform.job.postburner')
188
+ # @param payload [Hash] Event payload
189
+ # @yield Block to execute within instrumentation (for timing)
190
+ # @return [Object] Result of block if given
191
+ #
192
+ def instrument(event, payload = {}, &block)
193
+ ActiveSupport::Notifications.instrument(event, payload, &block)
194
+ end
195
+ end
196
+ end
@@ -123,6 +123,10 @@ module Postburner
123
123
  # 2. Ensure each schedule has a future execution queued
124
124
  # 3. Enqueue any orphaned pending executions that weren't properly queued
125
125
  #
126
+ # Instruments with ActiveSupport::Notifications:
127
+ # - perform_start.scheduler.postburner: When scheduler run begins
128
+ # - perform.scheduler.postburner: Around scheduler run (summary stats)
129
+ #
126
130
  # @return [void]
127
131
  #
128
132
  # @example Called by worker
@@ -134,16 +138,46 @@ module Postburner
134
138
  def perform
135
139
  logger.info "[Postburner::Scheduler] Starting scheduler run"
136
140
 
141
+ ActiveSupport::Notifications.instrument('perform_start.scheduler.postburner', {
142
+ interval: interval
143
+ })
144
+
145
+ # Track stats for summary event
146
+ @schedules_processed = 0
147
+ @schedules_failed = 0
148
+ @executions_created = 0
149
+ @orphans_enqueued = 0
150
+ lock_acquired = false
151
+
152
+ # Build payload hash that will be mutated with final stats
153
+ payload = {
154
+ interval: interval,
155
+ lock_acquired: nil,
156
+ schedules_processed: nil,
157
+ schedules_failed: nil,
158
+ executions_created: nil,
159
+ orphans_enqueued: nil
160
+ }
161
+
137
162
  # Use advisory lock to coordinate multiple workers
138
- acquired = Postburner::AdvisoryLock.with_lock(AdvisoryLock::SCHEDULER_LOCK_KEY, blocking: false) do
139
- process_all_schedules
140
- true
141
- end
163
+ ActiveSupport::Notifications.instrument('perform.scheduler.postburner', payload) do
164
+ lock_acquired = Postburner::AdvisoryLock.with_lock(AdvisoryLock::SCHEDULER_LOCK_KEY, blocking: false) do
165
+ process_all_schedules
166
+ true
167
+ end
142
168
 
143
- if acquired
144
- logger.info "[Postburner::Scheduler] Scheduler run complete"
145
- else
146
- logger.info "[Postburner::Scheduler] Could not acquire lock, skipping"
169
+ if lock_acquired
170
+ logger.info "[Postburner::Scheduler] Scheduler run complete"
171
+ else
172
+ logger.info "[Postburner::Scheduler] Could not acquire lock, skipping"
173
+ end
174
+
175
+ # Update payload with final stats (mutates the hash subscribers receive)
176
+ payload[:lock_acquired] = lock_acquired
177
+ payload[:schedules_processed] = @schedules_processed
178
+ payload[:schedules_failed] = @schedules_failed
179
+ payload[:executions_created] = @executions_created
180
+ payload[:orphans_enqueued] = @orphans_enqueued
147
181
  end
148
182
  ensure
149
183
  # Always re-queue watchdog for next run
@@ -297,21 +331,18 @@ module Postburner
297
331
  # @api private
298
332
  #
299
333
  def process_all_schedules
300
- processed_count = 0
301
- failed_count = 0
302
-
303
334
  Postburner::Schedule.enabled.find_each do |schedule|
304
335
  begin
305
336
  process_schedule(schedule)
306
- processed_count += 1
337
+ @schedules_processed += 1
307
338
  rescue => e
308
339
  logger.error "[Postburner::Scheduler] Failed to process schedule '#{schedule.name}': #{e.class} - #{e.message}"
309
340
  logger.error e.backtrace.join("\n")
310
- failed_count += 1
341
+ @schedules_failed += 1
311
342
  end
312
343
  end
313
344
 
314
- logger.info "[Postburner::Scheduler] Processed #{processed_count} schedules, #{failed_count} failed"
345
+ logger.info "[Postburner::Scheduler] Processed #{@schedules_processed} schedules, #{@schedules_failed} failed"
315
346
  end
316
347
 
317
348
  # Process a single schedule.
@@ -325,31 +356,43 @@ module Postburner
325
356
  # This ensures newly created executions are available for processing in the
326
357
  # same run if they happen to be due.
327
358
  #
359
+ # Instruments with ActiveSupport::Notifications:
360
+ # - audit.schedule.postburner: When schedule is audited
361
+ #
328
362
  # @param schedule [Postburner::Schedule] The schedule to process
329
363
  # @return [void]
330
364
  #
331
365
  # @api private
332
366
  #
333
367
  def process_schedule(schedule)
368
+ bootstrapped = false
369
+ execution_created = false
370
+ orphans_count = 0
371
+
334
372
  # Auto-bootstrap: create first execution if schedule hasn't been started
335
373
  # This will create the execution AND enqueue it to Beanstalkd
336
374
  unless schedule.started?
337
375
  logger.info "[Postburner::Scheduler] Bootstrapping schedule '#{schedule.name}'"
338
376
  schedule.start!
377
+ bootstrapped = true
378
+ @executions_created += 1
339
379
  end
340
380
 
341
381
  # Safety net 1: Ensure schedule has a future execution
342
382
  # If missing, this will create AND enqueue it to Beanstalkd's delayed queue
343
- ensure_future_execution!(schedule)
383
+ if ensure_future_execution!(schedule)
384
+ execution_created = true
385
+ @executions_created += 1
386
+ end
344
387
 
345
388
  # Safety net 2: Find any orphaned pending executions and enqueue them
346
389
  # This should rarely happen - only if enqueue! previously failed
347
- execution_count = 0
348
390
  schedule.executions.due.find_each do |execution|
349
391
  begin
350
392
  logger.warn "[Postburner::Scheduler] Found orphaned pending execution #{execution.id} for schedule '#{schedule.name}', enqueuing"
351
393
  execution.enqueue!
352
- execution_count += 1
394
+ orphans_count += 1
395
+ @orphans_enqueued += 1
353
396
  rescue => e
354
397
  logger.error "[Postburner::Scheduler] Failed to enqueue execution #{execution.id}: #{e.class} - #{e.message}"
355
398
  raise
@@ -359,7 +402,15 @@ module Postburner
359
402
  # Update last_audit_at
360
403
  schedule.update_column(:last_audit_at, Time.current)
361
404
 
362
- logger.debug "[Postburner::Scheduler] Schedule '#{schedule.name}': enqueued #{execution_count} orphaned executions" if execution_count > 0
405
+ # Instrument audit event
406
+ ActiveSupport::Notifications.instrument('audit.schedule.postburner', {
407
+ schedule: Postburner::Instrumentation.schedule_payload(schedule),
408
+ bootstrapped: bootstrapped,
409
+ execution_created: execution_created,
410
+ orphans_enqueued: orphans_count
411
+ })
412
+
413
+ logger.debug "[Postburner::Scheduler] Schedule '#{schedule.name}': enqueued #{orphans_count} orphaned executions" if orphans_count > 0
363
414
  end
364
415
 
365
416
  # Ensure schedule has a future scheduled or pending execution.
@@ -372,7 +423,7 @@ module Postburner
372
423
  # at least one future execution ready to run.
373
424
  #
374
425
  # @param schedule [Postburner::Schedule] The schedule to ensure future execution for
375
- # @return [void]
426
+ # @return [Boolean] true if a new execution was created, false otherwise
376
427
  #
377
428
  # @api private
378
429
  #
@@ -385,7 +436,7 @@ module Postburner
385
436
  # but handle it as a safety net
386
437
  logger.warn "[Postburner::Scheduler] Schedule '#{schedule.name}' has no executions, bootstrapping"
387
438
  schedule.start!
388
- return
439
+ return true
389
440
  end
390
441
 
391
442
  # Delegate to Schedule - it knows whether a future execution is needed
@@ -393,6 +444,9 @@ module Postburner
393
444
 
394
445
  if execution
395
446
  logger.info "[Postburner::Scheduler] Created future execution for '#{schedule.name}'"
447
+ true
448
+ else
449
+ false
396
450
  end
397
451
  end
398
452