prosody 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +7 -0
  2. data/.cargo/config.toml +2 -0
  3. data/.release-please-manifest.json +3 -0
  4. data/.rspec +3 -0
  5. data/.ruby-version +1 -0
  6. data/.standard.yml +9 -0
  7. data/.taplo.toml +6 -0
  8. data/ARCHITECTURE.md +591 -0
  9. data/CHANGELOG.md +92 -0
  10. data/Cargo.lock +3513 -0
  11. data/Cargo.toml +77 -0
  12. data/LICENSE +21 -0
  13. data/Makefile +36 -0
  14. data/README.md +946 -0
  15. data/Rakefile +26 -0
  16. data/ext/prosody/Cargo.toml +38 -0
  17. data/ext/prosody/extconf.rb +6 -0
  18. data/ext/prosody/src/admin.rs +171 -0
  19. data/ext/prosody/src/bridge/callback.rs +60 -0
  20. data/ext/prosody/src/bridge/mod.rs +332 -0
  21. data/ext/prosody/src/client/config.rs +819 -0
  22. data/ext/prosody/src/client/mod.rs +379 -0
  23. data/ext/prosody/src/gvl.rs +149 -0
  24. data/ext/prosody/src/handler/context.rs +436 -0
  25. data/ext/prosody/src/handler/message.rs +144 -0
  26. data/ext/prosody/src/handler/mod.rs +338 -0
  27. data/ext/prosody/src/handler/trigger.rs +93 -0
  28. data/ext/prosody/src/lib.rs +82 -0
  29. data/ext/prosody/src/logging.rs +353 -0
  30. data/ext/prosody/src/scheduler/cancellation.rs +67 -0
  31. data/ext/prosody/src/scheduler/handle.rs +50 -0
  32. data/ext/prosody/src/scheduler/mod.rs +169 -0
  33. data/ext/prosody/src/scheduler/processor.rs +166 -0
  34. data/ext/prosody/src/scheduler/result.rs +197 -0
  35. data/ext/prosody/src/tracing_util.rs +56 -0
  36. data/ext/prosody/src/util.rs +219 -0
  37. data/lib/prosody/configuration.rb +333 -0
  38. data/lib/prosody/handler.rb +177 -0
  39. data/lib/prosody/native_stubs.rb +417 -0
  40. data/lib/prosody/processor.rb +321 -0
  41. data/lib/prosody/sentry.rb +36 -0
  42. data/lib/prosody/version.rb +10 -0
  43. data/lib/prosody.rb +42 -0
  44. data/release-please-config.json +10 -0
  45. data/sig/configuration.rbs +252 -0
  46. data/sig/handler.rbs +79 -0
  47. data/sig/processor.rbs +100 -0
  48. data/sig/prosody.rbs +171 -0
  49. data/sig/version.rbs +9 -0
  50. metadata +193 -0
@@ -0,0 +1,321 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "async"
4
+ require "async/barrier"
5
+ require "opentelemetry-api"
6
+ require "prosody/version"
7
+
8
+ module Prosody
9
+ # Provides a mechanism for canceling asynchronous tasks.
10
+ #
11
+ # This class implements a simple cancellation mechanism using a Ruby Queue,
12
+ # allowing tasks to be safely canceled while they're in progress. Each token
13
+ # maintains its own queue for signaling cancellation.
14
+ class CancellationToken
15
+ # Creates a new cancellation token with an internal queue for signaling.
16
+ def initialize
17
+ @queue = Queue.new
18
+ end
19
+
20
+ # Signals that the associated task should be canceled.
21
+ #
22
+ # This method pushes a cancellation signal to the internal queue, which will
23
+ # wake up any threads waiting on #wait.
24
+ def cancel
25
+ @queue.push(:cancel)
26
+ end
27
+
28
+ # Blocks until cancellation is requested.
29
+ #
30
+ # This method blocks the current thread until the token is canceled by
31
+ # another thread calling #cancel.
32
+ #
33
+ # @return [Boolean] Always returns true after cancellation is received
34
+ def wait
35
+ @queue.pop
36
+ true
37
+ end
38
+ end
39
+
40
+ # Contains command classes for the AsyncTaskProcessor's command queue.
41
+ #
42
+ # This module implements a command pattern for communication with the processor
43
+ # thread, allowing for type-safe message passing between threads.
44
+ module Commands
45
+ # Base class for all processor commands.
46
+ #
47
+ # All commands sent to the AsyncTaskProcessor must inherit from this class
48
+ # for proper type identification.
49
+ class Command; end
50
+
51
+ # Command to execute a task with the given parameters.
52
+ #
53
+ # This command encapsulates all the information needed to execute an
54
+ # asynchronous task in the Ruby runtime.
55
+ class Execute < Command
56
+ # Task identifier for logging and debugging
57
+ attr_reader :task_id
58
+
59
+ # OpenTelemetry context carrier for trace propagation
60
+ attr_reader :carrier
61
+
62
+ # Structured event context for error reporting
63
+ attr_reader :event_context
64
+
65
+ # The block of code to execute
66
+ attr_reader :block
67
+
68
+ # Callback to invoke when execution completes or fails
69
+ attr_reader :callback
70
+
71
+ # Cancellation token for this task
72
+ attr_reader :token
73
+
74
+ # Creates a new execute command with all required parameters.
75
+ #
76
+ # @param task_id [String] Unique identifier for the task
77
+ # @param carrier [Hash] OpenTelemetry context carrier with trace information
78
+ # @param event_context [Hash] Structured event fields for error reporting
79
+ # @param block [Proc] The code to execute
80
+ # @param callback [Proc] Called with (success, result) when complete
81
+ # @param token [CancellationToken] Token that can be used to cancel execution
82
+ def initialize(task_id, carrier, event_context, block, callback, token)
83
+ @task_id = task_id
84
+ @carrier = carrier
85
+ @event_context = event_context
86
+ @block = block
87
+ @callback = callback
88
+ @token = token
89
+ end
90
+ end
91
+
92
+ # Command that signals the processor to shut down.
93
+ #
94
+ # When received, the processor will complete all in-flight tasks before
95
+ # shutting down.
96
+ class Shutdown < Command; end
97
+ end
98
+
99
+ # Processes asynchronous tasks in a dedicated thread with OpenTelemetry tracing.
100
+ #
101
+ # This processor manages a dedicated Ruby thread that executes tasks asynchronously
102
+ # with proper OpenTelemetry context propagation. It provides:
103
+ #
104
+ # - Task submission and execution in an isolated thread
105
+ # - Cancellation support for in-flight tasks
106
+ # - Context propagation for distributed tracing
107
+ # - Graceful shutdown with task completion
108
+ class AsyncTaskProcessor
109
+ # Creates a new processor with the given logger.
110
+ #
111
+ # @param logger [Logger] Logger for diagnostic messages (defaults to Prosody.logger)
112
+ def initialize(logger = Prosody.logger)
113
+ @logger = logger
114
+ @command_queue = Queue.new
115
+ @processing_thread = nil
116
+ @tracer = nil
117
+ end
118
+
119
+ # Starts the processor by launching a dedicated thread.
120
+ #
121
+ # The OpenTelemetry tracer is initialized in the processing thread
122
+ # to avoid crossing thread boundaries. Does nothing if the processor
123
+ # is already running.
124
+ def start
125
+ return if running?
126
+
127
+ @logger.debug("Starting async task processor")
128
+ @processing_thread = Thread.new do
129
+ # Initialize the tracer in the processing thread to keep
130
+ # OpenTelemetry context within the same thread
131
+ @tracer = OpenTelemetry.tracer_provider.tracer(
132
+ "Prosody::AsyncTaskProcessor",
133
+ Prosody::VERSION
134
+ )
135
+ process_commands
136
+ end
137
+ end
138
+
139
+ # Gracefully stops the processor.
140
+ #
141
+ # Tasks in progress will complete before the processor fully shuts down.
142
+ # Does nothing if the processor is already stopped.
143
+ def stop
144
+ return unless running?
145
+
146
+ @logger.debug("Stopping async task processor")
147
+ @command_queue.push(Commands::Shutdown.new)
148
+ end
149
+
150
+ # Submits a task for asynchronous execution.
151
+ #
152
+ # @param task_id [String] Unique identifier for the task
153
+ # @param carrier [Hash] OpenTelemetry context carrier for tracing
154
+ # @param event_context [Hash] Structured event fields for error reporting
155
+ # @param callback [Proc] Called with (success, result) when task completes
156
+ # @yield The block to execute asynchronously
157
+ # @return [CancellationToken] Token that can be used to cancel the task
158
+ def submit(task_id, carrier, event_context, callback, &task_block)
159
+ token = CancellationToken.new
160
+ @command_queue.push(
161
+ Commands::Execute.new(task_id, carrier, event_context.transform_keys(&:to_sym), task_block, callback, token)
162
+ )
163
+ token
164
+ end
165
+
166
+ private
167
+
168
+ # Checks if the processor thread is running.
169
+ #
170
+ # @return [Boolean] true if the processor is running, false otherwise
171
+ def running?
172
+ @processing_thread&.alive?
173
+ end
174
+
175
+ # Main processing loop for the async thread.
176
+ #
177
+ # Uses the async gem to handle concurrent task execution and tracks
178
+ # active tasks with a barrier for clean shutdown.
179
+ def process_commands
180
+ Async do
181
+ # Barrier tracks all running tasks for clean shutdown
182
+ barrier = Async::Barrier.new
183
+
184
+ loop do
185
+ command = @command_queue.pop
186
+
187
+ case command
188
+ when Commands::Execute
189
+ handle_execute(command, barrier)
190
+ when Commands::Shutdown
191
+ @logger.debug("Received shutdown command")
192
+ # Wait for all tasks to complete before shutting down
193
+ barrier.wait
194
+ break
195
+ else
196
+ @logger.warn("Unknown command type: #{command.class}")
197
+ end
198
+ end
199
+ end
200
+ rescue => e
201
+ @logger.error("Error in process_commands: #{e.message}")
202
+ @logger.error(e.backtrace.join("\n"))
203
+ end
204
+
205
+ # Handles execution of a task with proper context propagation and error handling.
206
+ #
207
+ # @param command [Commands::Execute] The command containing task details
208
+ # @param barrier [Async::Barrier] Barrier for tracking active tasks
209
+ def handle_execute(command, barrier)
210
+ task_id = command.task_id
211
+ carrier = command.carrier
212
+ event_context = command.event_context
213
+ token = command.token
214
+ callback = command.callback
215
+ task_block = command.block
216
+
217
+ # Extract parent context from the incoming carrier for distributed tracing
218
+ parent_ctx = OpenTelemetry.propagation.extract(carrier)
219
+
220
+ # Create the dispatch span as a child of the extracted context, then
221
+ # capture the resulting context so it can be explicitly restored inside
222
+ # the worker fiber. The span is owned by run_with_cancellation, which
223
+ # finishes it in ensure.
224
+ dispatch_ctx = OpenTelemetry::Context.with_current(parent_ctx) do
225
+ span = @tracer.start_span("async_dispatch", kind: :consumer)
226
+ OpenTelemetry::Trace.with_span(span) { OpenTelemetry::Context.current }
227
+ end
228
+
229
+ @logger.debug("Executing task #{task_id}")
230
+
231
+ begin
232
+ barrier.async do
233
+ run_with_cancellation(task_id, token, task_block, callback, dispatch_ctx, event_context)
234
+ end
235
+ rescue => e
236
+ # If we failed to enqueue, finish the span here since run_with_cancellation
237
+ # will never take ownership.
238
+ OpenTelemetry::Trace.current_span(dispatch_ctx).finish
239
+ raise e
240
+ end
241
+ end
242
+
243
+ # Executes a task with proper cancellation support.
244
+ #
245
+ # Spawns a worker task and a cancellation watcher within a barrier. When
246
+ # cancellation is signaled, the worker receives Async::Stop (similar to
247
+ # Python's asyncio.CancelledError). The worker can catch Async::Stop for
248
+ # cleanup. The barrier ensures both tasks are cleaned up when the block
249
+ # exits, even if an unexpected error occurs.
250
+ #
251
+ # @param task_id [String] The task identifier for logging
252
+ # @param token [CancellationToken] The token to monitor for cancellation
253
+ # @param task_block [Proc] The work to execute
254
+ # @param callback [Proc] The callback to notify of completion or error
255
+ # @param dispatch_ctx [OpenTelemetry::Context] Context with async_dispatch span active
256
+ def run_with_cancellation(task_id, token, task_block, callback, dispatch_ctx, event_context)
257
+ span = OpenTelemetry::Trace.current_span(dispatch_ctx)
258
+ # Use a barrier to ensure both tasks are cleaned up when block exits
259
+ barrier = Async::Barrier.new
260
+
261
+ # Spawn worker task - handles its own result reporting
262
+ worker_task = barrier.async do |task|
263
+ task.annotate("Worker for task #{task_id}")
264
+ # Async fibers do not inherit fiber-local OTel context from their parent,
265
+ # so we must explicitly restore dispatch_ctx so user spans are children
266
+ # of async_dispatch.
267
+ OpenTelemetry::Context.with_current(dispatch_ctx) do
268
+ result = task_block.call
269
+ if callback.call(true, result)
270
+ @logger.debug("Task #{task_id} completed successfully")
271
+ end
272
+ rescue Async::Stop
273
+ # Task was cancelled - report via callback
274
+ if callback.call(false, RuntimeError.new("Task cancelled"))
275
+ @logger.debug("Task #{task_id} was cancelled")
276
+ end
277
+ rescue => e
278
+ Prosody::SentryIntegration.capture_exception(e, event_context.merge(task_id: task_id))
279
+ if callback.call(false, e)
280
+ @logger.error("Error executing task #{task_id}: #{e.message}")
281
+ span.record_exception(e)
282
+ span.status = OpenTelemetry::Trace::Status.error(e.to_s)
283
+ end
284
+ ensure
285
+ # Always signal the cancellation watcher to stop waiting
286
+ token.cancel
287
+ end
288
+ end
289
+
290
+ # Spawn cancellation watcher - bridges the thread-boundary cancellation signal
291
+ # into the fiber scheduler. The CancellationToken is a one-shot channel: the
292
+ # Rust bridge pushes a signal from its thread, and this fiber pops it and
293
+ # translates it into Async::Stop on the worker. We can't call worker_task.stop
294
+ # directly from the Rust thread since Async task control must happen on the
295
+ # scheduler thread.
296
+ barrier.async do |task|
297
+ task.annotate("Cancellation watcher for task #{task_id}")
298
+ begin
299
+ token.wait
300
+ worker_task.stop
301
+ rescue => e
302
+ @logger.debug("Cancellation watcher error: #{e.message}")
303
+ span.record_exception(e)
304
+ span.status = OpenTelemetry::Trace::Status.error(e.to_s)
305
+ end
306
+ end
307
+
308
+ # Wait for worker to complete (normally, via Async::Stop, or with error)
309
+ worker_task.wait
310
+ ensure
311
+ # Stop any remaining tasks (primarily the cancellation watcher).
312
+ # Finish the span last so it covers the full execution, and is guaranteed
313
+ # to close even if barrier.stop raises.
314
+ begin
315
+ barrier&.stop
316
+ ensure
317
+ span.finish
318
+ end
319
+ end
320
+ end
321
+ end
@@ -0,0 +1,36 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Prosody
4
+ module SentryIntegration
5
+ def self.enabled?
6
+ if ENV["SENTRY_DSN"] && !defined?(::Sentry)
7
+ unless @warned_missing_gem
8
+ @warned_missing_gem = true
9
+ Prosody.logger.error("SENTRY_DSN is set but sentry-ruby is not installed. Add `gem 'sentry-ruby'` to your Gemfile.")
10
+ end
11
+ return false
12
+ end
13
+
14
+ return false unless defined?(::Sentry)
15
+
16
+ unless ::Sentry.initialized?
17
+ return false unless ENV["SENTRY_DSN"]
18
+
19
+ ::Sentry.init { |c| c.dsn = ENV["SENTRY_DSN"] }
20
+ end
21
+
22
+ ::Sentry.initialized?
23
+ end
24
+
25
+ def self.capture_exception(exception, context = {})
26
+ return unless enabled?
27
+
28
+ ::Sentry.with_scope do |scope|
29
+ scope.set_context("prosody", context)
30
+ event_type = context[:event_type]
31
+ scope.set_tag("prosody.event_type", event_type.to_s) if event_type
32
+ ::Sentry.capture_exception(exception)
33
+ end
34
+ end
35
+ end
36
+ end
@@ -0,0 +1,10 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Prosody
4
+ # Current version of the Prosody library.
5
+ #
6
+ # This version number follows semantic versioning and is used by the
7
+ # gem system to identify the library version. It should be updated
8
+ # according to semver guidelines when making releases.
9
+ VERSION = "0.1.1"
10
+ end
data/lib/prosody.rb ADDED
@@ -0,0 +1,42 @@
1
+ # frozen_string_literal: true
2
+
3
+ # The Prosody gem provides a Ruby interface for the Prosody event processing system.
4
+ # It implements a high-level client for working with Kafka message streams, with
5
+ # support for both producing and consuming messages in an idiomatic Ruby way.
6
+ #
7
+ # This library wraps a native Rust implementation for high performance while
8
+ # providing a comfortable Ruby API, with features including:
9
+ # - Configuration with Ruby-friendly syntax
10
+ # - Handler classes for processing messages
11
+ # - Async/non-blocking processing
12
+ # - OpenTelemetry integration for distributed tracing
13
+ # - Automatic error classification and retry logic
14
+
15
+ require "async"
16
+ require "logger"
17
+ require_relative "prosody/version"
18
+ require_relative "prosody/configuration"
19
+ require_relative "prosody/handler"
20
+ require_relative "prosody/processor"
21
+ require_relative "prosody/sentry"
22
+ require_relative "prosody/native_stubs" if defined?(Prosody::Client)
23
+
24
+ module Prosody
25
+ def self.logger
26
+ @logger ||= Logger.new($stdout).tap { |l| l.level = Logger::INFO }
27
+ end
28
+
29
+ def self.logger=(logger)
30
+ @logger = logger
31
+ end
32
+ end
33
+
34
+ # Attempt to load the native extension specific to the current Ruby version first,
35
+ # falling back to the generic version if not available. This allows for optimized
36
+ # builds targeting specific Ruby versions.
37
+ begin
38
+ ruby_version = /(\d+\.\d+)/.match(RUBY_VERSION)
39
+ require_relative "prosody/#{ruby_version}/prosody"
40
+ rescue LoadError
41
+ require_relative "prosody/prosody"
42
+ end
@@ -0,0 +1,10 @@
1
+ {
2
+ "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json",
3
+ "packages": {
4
+ ".": {
5
+ "release-type": "ruby",
6
+ "package-name": "prosody",
7
+ "version-file": "lib/prosody/version.rb"
8
+ }
9
+ }
10
+ }
@@ -0,0 +1,252 @@
1
+ module Prosody
2
+ # Configuration for the Prosody messaging client with type validation
3
+ class Configuration
4
+ private
5
+
6
+ # Internal configuration store
7
+ @config: Hash[Symbol, untyped]
8
+
9
+ public
10
+
11
+ # Define a typed configuration parameter with optional conversion and default
12
+ #
13
+ # @param name Configuration parameter name
14
+ # @param converter Function to validate and convert input values
15
+ # @param default Value returned when parameter is unset
16
+ def self.config_param: (Symbol | String name, ?converter: ^(untyped) -> untyped, ?default: untyped) -> void
17
+
18
+ # Convert duration values to floating point seconds
19
+ #
20
+ # @param v Duration value to convert
21
+ # @raise ArgumentError when value is not a valid duration
22
+ def self.duration_converter: (Numeric | untyped) -> Float
23
+
24
+ # Create a new configuration with initial values
25
+ #
26
+ # @param kwargs Initial configuration values
27
+ def initialize: (?Hash[Symbol, untyped] kwargs) ?{ (Configuration) -> void } -> void
28
+
29
+ # Kafka bootstrap server addresses
30
+ def bootstrap_servers: () -> Array[String]?
31
+ def bootstrap_servers=: (String | _ToA[String]) -> Array[String]
32
+
33
+ # Kafka topics to subscribe to
34
+ def subscribed_topics: () -> Array[String]?
35
+ def subscribed_topics=: (String | _ToA[String]) -> Array[String]
36
+
37
+ # Event types the consumer is allowed to process
38
+ def allowed_events: () -> Array[String]?
39
+ def allowed_events=: (String | _ToA[String]) -> Array[String]
40
+
41
+ # Use mock Kafka implementation for testing
42
+ def mock: () -> bool?
43
+ def mock=: (untyped) -> bool
44
+
45
+ # Maximum time to wait for a send operation (seconds)
46
+ def send_timeout: () -> Float?
47
+ def send_timeout=: (Numeric) -> Float
48
+
49
+ # Time threshold for detecting stalled consumers (seconds)
50
+ def stall_threshold: () -> Float?
51
+ def stall_threshold=: (Numeric) -> Float
52
+
53
+ # Maximum time to wait for clean shutdown (seconds)
54
+ def shutdown_timeout: () -> Float?
55
+ def shutdown_timeout=: (Numeric) -> Float
56
+
57
+ # Interval between Kafka poll operations (seconds)
58
+ def poll_interval: () -> Float?
59
+ def poll_interval=: (Numeric) -> Float
60
+
61
+ # Interval between offset commit operations (seconds)
62
+ def commit_interval: () -> Float?
63
+ def commit_interval=: (Numeric) -> Float
64
+
65
+ # Base delay for retry operations (seconds)
66
+ def retry_base: () -> Float?
67
+ def retry_base=: (Numeric) -> Float
68
+
69
+ # Maximum delay between retries (seconds)
70
+ def max_retry_delay: () -> Float?
71
+ def max_retry_delay=: (Numeric) -> Float
72
+
73
+ # Size of the cache used for message idempotence
74
+ def idempotence_cache_size: () -> Integer?
75
+ def idempotence_cache_size=: (Integer | _ToInt) -> Integer
76
+
77
+ # Version string for cache-busting deduplication hashes
78
+ def idempotence_version: () -> String?
79
+ def idempotence_version=: (_ToS) -> String
80
+
81
+ # TTL for deduplication records in Cassandra (in seconds)
82
+ def idempotence_ttl: () -> Float?
83
+ def idempotence_ttl=: (Numeric) -> Float
84
+
85
+ # Maximum number of concurrent message processing tasks
86
+ def max_concurrency: () -> Integer?
87
+ def max_concurrency=: (Integer | _ToInt) -> Integer
88
+
89
+ # Maximum messages to process before committing offsets
90
+ def max_uncommitted: () -> Integer?
91
+ def max_uncommitted=: (Integer | _ToInt) -> Integer
92
+
93
+ # Maximum number of retry attempts
94
+ def max_retries: () -> Integer?
95
+ def max_retries=: (Integer | _ToInt) -> Integer
96
+
97
+ # Kafka consumer group ID
98
+ def group_id: () -> String?
99
+ def group_id=: (_ToS) -> String
100
+
101
+ # Identifier for the system producing messages
102
+ def source_system: () -> String?
103
+ def source_system=: (_ToS) -> String
104
+
105
+ # Topic to send failed messages to
106
+ def failure_topic: () -> String?
107
+ def failure_topic=: (_ToS) -> String
108
+
109
+ # Operation mode: :pipeline, :low_latency, or :best_effort
110
+ def mode: () -> Symbol?
111
+ def mode=: (Symbol | String) -> Symbol
112
+
113
+ # Health probe port configuration (port number, false, or nil)
114
+ def probe_port: () -> (Integer | false | nil)
115
+ def probe_port=: (Integer | Symbol | String | false | nil) -> (Integer | false | nil)
116
+
117
+ # Cassandra nodes (hostnames or IPs)
118
+ def cassandra_nodes: () -> Array[String]?
119
+ def cassandra_nodes=: (String | _ToA[String]) -> Array[String]
120
+
121
+ # Cassandra keyspace name
122
+ def cassandra_keyspace: () -> String?
123
+ def cassandra_keyspace=: (_ToS) -> String
124
+
125
+ # Cassandra datacenter for query routing
126
+ def cassandra_datacenter: () -> String?
127
+ def cassandra_datacenter=: (_ToS) -> String
128
+
129
+ # Cassandra rack for topology-aware routing
130
+ def cassandra_rack: () -> String?
131
+ def cassandra_rack=: (_ToS) -> String
132
+
133
+ # Cassandra authentication user
134
+ def cassandra_user: () -> String?
135
+ def cassandra_user=: (_ToS) -> String
136
+
137
+ # Cassandra authentication password
138
+ def cassandra_password: () -> String?
139
+ def cassandra_password=: (_ToS) -> String
140
+
141
+ # Cassandra data retention period (seconds)
142
+ def cassandra_retention: () -> Float?
143
+ def cassandra_retention=: (Numeric) -> Float
144
+
145
+ # Timer slab partitioning duration (seconds)
146
+ def slab_size: () -> Float?
147
+ def slab_size=: (Numeric) -> Float
148
+
149
+ # Scheduler: failure/retry task weight (0.0 to 1.0)
150
+ def scheduler_failure_weight: () -> Float?
151
+ def scheduler_failure_weight=: (Numeric) -> Float
152
+
153
+ # Scheduler: max wait for urgency boost (seconds)
154
+ def scheduler_max_wait: () -> Float?
155
+ def scheduler_max_wait=: (Numeric) -> Float
156
+
157
+ # Scheduler: wait time priority weight
158
+ def scheduler_wait_weight: () -> Float?
159
+ def scheduler_wait_weight=: (Numeric) -> Float
160
+
161
+ # Scheduler: cache size for per-key virtual time
162
+ def scheduler_cache_size: () -> Integer?
163
+ def scheduler_cache_size=: (Integer | _ToInt) -> Integer
164
+
165
+ # Monopolization detection enabled
166
+ def monopolization_enabled: () -> bool?
167
+ def monopolization_enabled=: (untyped) -> bool
168
+
169
+ # Monopolization detection threshold (0.0 to 1.0)
170
+ def monopolization_threshold: () -> Float?
171
+ def monopolization_threshold=: (Numeric) -> Float
172
+
173
+ # Monopolization detection window (seconds)
174
+ def monopolization_window: () -> Float?
175
+ def monopolization_window=: (Numeric) -> Float
176
+
177
+ # Monopolization cache size
178
+ def monopolization_cache_size: () -> Integer?
179
+ def monopolization_cache_size=: (Integer | _ToInt) -> Integer
180
+
181
+ # Deferral enabled
182
+ def defer_enabled: () -> bool?
183
+ def defer_enabled=: (untyped) -> bool
184
+
185
+ # Defer base backoff delay (seconds)
186
+ def defer_base: () -> Float?
187
+ def defer_base=: (Numeric) -> Float
188
+
189
+ # Defer maximum delay (seconds)
190
+ def defer_max_delay: () -> Float?
191
+ def defer_max_delay=: (Numeric) -> Float
192
+
193
+ # Defer failure threshold (0.0 to 1.0)
194
+ def defer_failure_threshold: () -> Float?
195
+ def defer_failure_threshold=: (Numeric) -> Float
196
+
197
+ # Defer failure window (seconds)
198
+ def defer_failure_window: () -> Float?
199
+ def defer_failure_window=: (Numeric) -> Float
200
+
201
+ # Defer cache size
202
+ def defer_cache_size: () -> Integer?
203
+ def defer_cache_size=: (Integer | _ToInt) -> Integer
204
+
205
+ # Defer seek timeout (seconds)
206
+ def defer_seek_timeout: () -> Float?
207
+ def defer_seek_timeout=: (Numeric) -> Float
208
+
209
+ # Defer discard threshold (messages)
210
+ def defer_discard_threshold: () -> Integer?
211
+ def defer_discard_threshold=: (Integer | _ToInt) -> Integer
212
+
213
+ # Handler execution timeout (seconds)
214
+ def timeout: () -> Float?
215
+ def timeout=: (Numeric) -> Float
216
+
217
+ # Kafka topic to produce internal telemetry events to
218
+ def telemetry_topic: () -> String?
219
+ def telemetry_topic=: (_ToS) -> String
220
+
221
+ # Whether the telemetry emitter is enabled
222
+ def telemetry_enabled: () -> bool?
223
+ def telemetry_enabled=: (untyped) -> bool
224
+
225
+ # Span linking for message execution spans ('child' or 'follows_from')
226
+ def message_spans: () -> String?
227
+ def message_spans=: (_ToS) -> String
228
+
229
+ # Span linking for timer execution spans ('child' or 'follows_from')
230
+ def timer_spans: () -> String?
231
+ def timer_spans=: (_ToS) -> String
232
+
233
+ # Convert configuration to a hash with non-nil values only
234
+ def to_hash: () -> Hash[Symbol, untyped]
235
+ end
236
+
237
+ # Helper interface for objects convertible to array
238
+ interface _ToA[T]
239
+ def to_a: () -> Array[T]
240
+ end
241
+
242
+ # Helper interface for objects convertible to string
243
+ interface _ToS
244
+ def to_s: () -> String
245
+ end
246
+
247
+ # Helper interface for objects convertible to integer
248
+ interface _ToInt
249
+ def to_int: () -> Integer
250
+ end
251
+
252
+ end