natswork-client 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,7 @@
1
+ # frozen_string_literal: true
2
+
3
+ module NatsWork
4
+ class Client
5
+ VERSION = '0.0.1'
6
+ end
7
+ end
@@ -0,0 +1,397 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'singleton'
4
+ require 'securerandom'
5
+ require 'timeout'
6
+ require 'natswork/client/version'
7
+ require 'natswork/errors'
8
+ require 'natswork/serializer'
9
+ require 'natswork/message'
10
+ require 'natswork/job'
11
+ require 'natswork/connection'
12
+ require 'natswork/connection_pool'
13
+ require 'natswork/configuration'
14
+ require 'natswork/jetstream_manager'
15
+ require 'natswork/circuit_breaker'
16
+
17
+ module NatsWork
18
+ class Client
19
+ include Singleton
20
+
21
+ attr_reader :connection_pool, :jetstream_manager, :configuration
22
+
23
+ def initialize
24
+ @configuration = Configuration.instance
25
+ @connection_pool = nil
26
+ @jetstream_manager = nil
27
+ @mutex = Mutex.new
28
+ @result_store = {}
29
+ @result_callbacks = {}
30
+ @scheduled_jobs = []
31
+ @result_expiration_times = {}
32
+ start_result_expiration_thread
33
+ end
34
+
35
+ def configure
36
+ yield @configuration if block_given?
37
+ reset_connection!
38
+ end
39
+
40
+ def reset_connection!
41
+ shutdown_existing_connections
42
+ @connection_pool = ConnectionPool.new(
43
+ size: @configuration.pool_size || 5,
44
+ timeout: @configuration.pool_timeout || 5,
45
+ connection_options: {
46
+ servers: @configuration.servers || ['nats://localhost:4222'],
47
+ max_reconnect_attempts: @configuration.max_reconnect_attempts || 10,
48
+ reconnect_time_wait: @configuration.reconnect_time_wait || 2
49
+ }
50
+ )
51
+ # JetStreamManager will be created lazily when needed
52
+ ensure_streams_exist
53
+ end
54
+
55
+ # Connection management
56
+ def self.start
57
+ instance.start_connection
58
+ end
59
+
60
+ def self.stop
61
+ instance.stop_connection
62
+ end
63
+
64
+ # Main API for job dispatching
65
+ def self.push(options)
66
+ instance.push(options)
67
+ end
68
+
69
+ def self.perform_sync(options)
70
+ instance.perform_sync(options)
71
+ end
72
+
73
+ def self.perform_async(options)
74
+ instance.push(options)
75
+ end
76
+
77
+ def self.perform_in(delay, options)
78
+ instance.perform_in(delay, options)
79
+ end
80
+
81
+ def self.perform_at(time, options)
82
+ instance.perform_at(time, options)
83
+ end
84
+
85
+ def self.batch(jobs)
86
+ instance.batch(jobs)
87
+ end
88
+
89
+ # Instance methods
90
+ def push(options)
91
+ ensure_connected!
92
+
93
+ message = build_message(options)
94
+ message.validate!
95
+
96
+ connection_pool.with_connection do |connection|
97
+ subject = subject_for_queue(message.queue)
98
+
99
+ # For JetStream persistence
100
+ if configuration.use_jetstream
101
+ js = connection.jetstream
102
+ js.publish(subject, message.to_json)
103
+ else
104
+ # For Core NATS
105
+ connection.publish(subject, message.to_json)
106
+ end
107
+ end
108
+
109
+ message.job_id
110
+ end
111
+
112
+ def perform_sync(options)
113
+ ensure_connected!
114
+
115
+ message = build_message(options)
116
+ message.validate!
117
+
118
+ result = nil
119
+ error = nil
120
+
121
+ connection_pool.with_connection do |connection|
122
+ subject = subject_for_queue(message.queue)
123
+ timeout_seconds = options[:timeout] || configuration.sync_timeout || 5
124
+
125
+ # Use NATS request-reply pattern
126
+ response = connection.connection.request(subject, message.to_json, timeout: timeout_seconds)
127
+
128
+ raise TimeoutError, "Job #{message.job_id} timed out after #{timeout_seconds} seconds" unless response
129
+
130
+ reply_data = JSON.parse(response.data, symbolize_names: true)
131
+
132
+ if reply_data[:success]
133
+ result = reply_data[:result]
134
+ else
135
+ error = reply_data[:error] || 'Job execution failed'
136
+ raise JobError.new(
137
+ error,
138
+ job_class: message.job_class,
139
+ job_id: message.job_id,
140
+ original_error: reply_data
141
+ )
142
+ end
143
+
144
+ result
145
+ rescue NATS::IO::Timeout
146
+ raise TimeoutError, "Job #{message.job_id} timed out after #{timeout_seconds} seconds"
147
+ rescue JSON::ParserError => e
148
+ raise JobError.new("Failed to parse response: #{e.message}", job_class: message.job_class,
149
+ job_id: message.job_id)
150
+ end
151
+ end
152
+
153
+ def perform_in(delay, options)
154
+ perform_at(Time.now + delay, options)
155
+ end
156
+
157
+ def perform_at(time, options)
158
+ ensure_connected!
159
+
160
+ message = build_message(options)
161
+ message.metadata['scheduled_at'] = time.iso8601
162
+ message.metadata['scheduled_for'] = time.to_i
163
+ message.validate!
164
+
165
+ # Store scheduled job for later processing
166
+ @mutex.synchronize do
167
+ @scheduled_jobs << {
168
+ message: message,
169
+ scheduled_for: time,
170
+ status: 'pending'
171
+ }
172
+ end
173
+
174
+ # Start scheduler thread if not running
175
+ ensure_scheduler_running
176
+
177
+ message.job_id
178
+ end
179
+
180
+ def batch(jobs)
181
+ ensure_connected!
182
+
183
+ batch_id = SecureRandom.uuid
184
+ job_ids = []
185
+
186
+ connection_pool.with_connection do |connection|
187
+ jobs.each do |job_options|
188
+ # Merge batch metadata into existing metadata
189
+ merged_metadata = (job_options[:metadata] || {}).merge(
190
+ 'batch_id' => batch_id,
191
+ 'batch_size' => jobs.size
192
+ )
193
+
194
+ message = build_message(job_options)
195
+ message.metadata.merge!(merged_metadata)
196
+ message.validate!
197
+
198
+ subject = subject_for_queue(message.queue)
199
+
200
+ if configuration.use_jetstream
201
+ js = connection.jetstream
202
+ js.publish(subject, message.to_json)
203
+ else
204
+ connection.publish(subject, message.to_json)
205
+ end
206
+
207
+ job_ids << message.job_id
208
+ end
209
+ end
210
+
211
+ { batch_id: batch_id, job_ids: job_ids }
212
+ end
213
+
214
+ # Job status and management
215
+ def job_status(job_id)
216
+ @mutex.synchronize do
217
+ @result_store[job_id]
218
+ end
219
+ end
220
+
221
+ def store_result(job_id, result, ttl = 3600)
222
+ @mutex.synchronize do
223
+ @result_store[job_id] = result
224
+ @result_expiration_times[job_id] = Time.now + ttl
225
+
226
+ # Trigger callbacks if any
227
+ if @result_callbacks[job_id]
228
+ @result_callbacks[job_id].each { |callback| callback.call(result) }
229
+ @result_callbacks.delete(job_id)
230
+ end
231
+ end
232
+ end
233
+
234
+ def cancel_job(job_id)
235
+ # Publish cancellation message
236
+ connection_pool.with_connection do |connection|
237
+ cancel_subject = "#{configuration.namespace}.control.cancel"
238
+
239
+ cancel_message = {
240
+ type: 'job.cancel',
241
+ job_id: job_id,
242
+ timestamp: Time.now.iso8601
243
+ }
244
+
245
+ connection.publish(cancel_subject, cancel_message.to_json)
246
+ end
247
+ true
248
+ end
249
+
250
+ def on_result(job_id, &block)
251
+ @mutex.synchronize do
252
+ @result_callbacks[job_id] ||= []
253
+ @result_callbacks[job_id] << block
254
+ end
255
+ end
256
+
257
+ def shutdown
258
+ @mutex.synchronize do
259
+ shutdown_existing_connections
260
+ end
261
+ end
262
+
263
+ def start_connection
264
+ ensure_connected!
265
+ end
266
+
267
+ def stop_connection
268
+ shutdown
269
+ end
270
+
271
+ private
272
+
273
+ def ensure_connected!
274
+ @mutex.synchronize do
275
+ reset_connection! if @connection_pool.nil?
276
+ end
277
+ end
278
+
279
+ def shutdown_existing_connections
280
+ @connection_pool&.shutdown
281
+ @jetstream_manager = nil
282
+ end
283
+
284
+ def ensure_streams_exist
285
+ return unless configuration.use_jetstream && @jetstream_manager
286
+
287
+ # Create default streams for common queues
288
+ %w[default high low].each do |queue|
289
+ @jetstream_manager.create_stream(
290
+ queue,
291
+ subjects: ["#{configuration.namespace}.queue.#{queue}"]
292
+ )
293
+ end
294
+ rescue StandardError => e
295
+ # Log but don't fail if streams can't be created
296
+ configuration.logger&.warn "Failed to create JetStream streams: #{e.message}"
297
+ end
298
+
299
+ def build_message(options)
300
+ Message.new(
301
+ type: Message::TYPE_JOB_DISPATCH,
302
+ job_id: options[:job_id] || SecureRandom.uuid,
303
+ job_class: options[:job_class],
304
+ queue: options[:queue] || 'default',
305
+ arguments: options[:arguments] || [],
306
+ retry_count: options[:retry_count] || 0,
307
+ max_retries: options[:max_retries] || configuration.max_retries,
308
+ timeout: options[:timeout] || configuration.job_timeout,
309
+ metadata: options[:metadata] || {},
310
+ created_at: Time.now.iso8601
311
+ )
312
+ end
313
+
314
+ def subject_for_queue(queue_name)
315
+ "#{configuration.namespace}.queue.#{queue_name}"
316
+ end
317
+
318
+ def ensure_scheduler_running
319
+ return if @scheduler_thread&.alive?
320
+
321
+ @scheduler_thread = Thread.new do
322
+ loop do
323
+ process_scheduled_jobs
324
+ sleep 1
325
+ rescue StandardError => e
326
+ configuration.logger&.error "Scheduler error: #{e.message}"
327
+ end
328
+ end
329
+ end
330
+
331
+ def process_scheduled_jobs
332
+ now = Time.now
333
+
334
+ jobs_to_process = @mutex.synchronize do
335
+ ready_jobs = @scheduled_jobs.select do |job|
336
+ job[:status] == 'pending' && job[:scheduled_for] <= now
337
+ end
338
+
339
+ ready_jobs.each { |job| job[:status] = 'processing' }
340
+ ready_jobs
341
+ end
342
+
343
+ jobs_to_process.each do |scheduled_job|
344
+ message = scheduled_job[:message]
345
+ message.metadata.delete('scheduled_at')
346
+ message.metadata.delete('scheduled_for')
347
+ message.enqueued_at = Time.now.iso8601
348
+
349
+ push(message.to_hash)
350
+
351
+ @mutex.synchronize do
352
+ scheduled_job[:status] = 'completed'
353
+ end
354
+ rescue StandardError => e
355
+ configuration.logger&.error "Failed to enqueue scheduled job: #{e.message}"
356
+ @mutex.synchronize do
357
+ scheduled_job[:status] = 'failed'
358
+ end
359
+ end
360
+
361
+ # Clean up completed/failed jobs
362
+ @mutex.synchronize do
363
+ @scheduled_jobs.reject! { |job| %w[completed failed].include?(job[:status]) }
364
+ end
365
+ end
366
+
367
+ def start_result_expiration_thread
368
+ @result_expiration_thread = Thread.new do
369
+ loop do
370
+ expire_old_results
371
+ sleep 60 # Check every minute
372
+ rescue StandardError => e
373
+ configuration.logger&.error "Result expiration error: #{e.message}"
374
+ end
375
+ end
376
+ end
377
+
378
+ def expire_old_results
379
+ now = Time.now
380
+ expired_job_ids = []
381
+
382
+ @mutex.synchronize do
383
+ @result_expiration_times.each do |job_id, expiration_time|
384
+ expired_job_ids << job_id if expiration_time <= now
385
+ end
386
+
387
+ expired_job_ids.each do |job_id|
388
+ @result_store.delete(job_id)
389
+ @result_expiration_times.delete(job_id)
390
+ @result_callbacks.delete(job_id) # Clean up any stale callbacks
391
+ end
392
+ end
393
+
394
+ configuration.logger&.debug "Expired #{expired_job_ids.size} job results" if expired_job_ids.any?
395
+ end
396
+ end
397
+ end
@@ -0,0 +1,58 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'zlib'
4
+ require 'base64'
5
+
6
+ module NatsWork
7
+ module Compression
8
+ COMPRESSION_THRESHOLD = 10_240 # 10KB
9
+
10
+ class << self
11
+ def compress(data)
12
+ json = data.is_a?(String) ? data : data.to_json
13
+
14
+ # Only compress if data is large enough
15
+ return { compressed: false, data: data } if json.bytesize < COMPRESSION_THRESHOLD
16
+
17
+ compressed = Zlib::Deflate.deflate(json, Zlib::BEST_COMPRESSION)
18
+ encoded = Base64.strict_encode64(compressed)
19
+
20
+ # Only use compression if it actually reduces size
21
+ if encoded.bytesize < json.bytesize * 0.9
22
+ {
23
+ compressed: true,
24
+ data: encoded,
25
+ original_size: json.bytesize,
26
+ compressed_size: encoded.bytesize
27
+ }
28
+ else
29
+ { compressed: false, data: data }
30
+ end
31
+ rescue StandardError => e
32
+ # If compression fails, return original data
33
+ { compressed: false, data: data, error: e.message }
34
+ end
35
+
36
+ def decompress(payload)
37
+ return payload unless payload.is_a?(Hash) && payload[:compressed]
38
+
39
+ decoded = Base64.strict_decode64(payload[:data])
40
+ json = Zlib::Inflate.inflate(decoded)
41
+ JSON.parse(json, symbolize_names: true)
42
+ rescue StandardError => e
43
+ raise InvalidMessageError, "Failed to decompress data: #{e.message}"
44
+ end
45
+
46
+ def should_compress?(data)
47
+ json = data.is_a?(String) ? data : data.to_json
48
+ json.bytesize >= COMPRESSION_THRESHOLD
49
+ end
50
+
51
+ def compression_ratio(original_size, compressed_size)
52
+ return 0 if original_size.zero?
53
+
54
+ ((1 - (compressed_size.to_f / original_size)) * 100).round(2)
55
+ end
56
+ end
57
+ end
58
+ end
@@ -0,0 +1,117 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'singleton'
4
+ require 'natswork/errors'
5
+
6
+ module NatsWork
7
+ class ConfigurationError < Error; end
8
+
9
+ class Configuration
10
+ include Singleton
11
+
12
+ attr_accessor :servers, :pool_size, :pool_timeout, :max_reconnect_attempts,
13
+ :reconnect_time_wait, :request_timeout, :user, :password,
14
+ :token, :tls, :jetstream_enabled, :jetstream_prefix,
15
+ :use_jetstream, :namespace, :max_retries, :job_timeout,
16
+ :sync_timeout, :logger
17
+
18
+ def initialize
19
+ @servers = ['nats://localhost:4222']
20
+ @pool_size = 5
21
+ @pool_timeout = 5
22
+ @max_reconnect_attempts = 10
23
+ @reconnect_time_wait = 2
24
+ @request_timeout = 5
25
+ @user = nil
26
+ @password = nil
27
+ @token = nil
28
+ @tls = nil
29
+ @jetstream_enabled = true
30
+ @jetstream_prefix = 'natswork'
31
+ @use_jetstream = true
32
+ @namespace = 'natswork'
33
+ @max_retries = 3
34
+ @job_timeout = 30
35
+ @sync_timeout = 30
36
+ @logger = nil
37
+ end
38
+
39
+ def to_connection_options
40
+ opts = {
41
+ servers: servers,
42
+ max_reconnect_attempts: max_reconnect_attempts,
43
+ reconnect_time_wait: reconnect_time_wait
44
+ }
45
+
46
+ opts[:user] = user if user
47
+ opts[:password] = password if password
48
+ opts[:token] = token if token
49
+ opts[:tls] = tls if tls
50
+
51
+ opts
52
+ end
53
+
54
+ alias connection_options to_connection_options
55
+
56
+ def to_pool_options
57
+ {
58
+ size: pool_size,
59
+ timeout: pool_timeout,
60
+ connection_options: to_connection_options
61
+ }
62
+ end
63
+
64
+ def validate!
65
+ raise ConfigurationError, 'At least one server must be configured' if servers.empty?
66
+
67
+ servers.each do |server|
68
+ raise ConfigurationError, "Invalid server URL: #{server}" unless server =~ %r{^nats://}
69
+ end
70
+
71
+ raise ConfigurationError, 'Pool size must be positive' if pool_size <= 0
72
+ raise ConfigurationError, 'Pool timeout must be positive' if pool_timeout <= 0
73
+ raise ConfigurationError, 'Max reconnect attempts must be non-negative' if max_reconnect_attempts.negative?
74
+ raise ConfigurationError, 'Reconnect time wait must be positive' if reconnect_time_wait <= 0
75
+ raise ConfigurationError, 'Request timeout must be positive' if request_timeout <= 0
76
+
77
+ true
78
+ end
79
+
80
+ class << self
81
+ def from_hash(hash)
82
+ config = new
83
+
84
+ hash.each do |key, value|
85
+ setter = "#{key}="
86
+ config.send(setter, value) if config.respond_to?(setter)
87
+ end
88
+
89
+ config
90
+ end
91
+
92
+ def from_env
93
+ config = new
94
+
95
+ config.servers = ENV['NATSWORK_SERVERS'].split(',').map(&:strip) if ENV['NATSWORK_SERVERS']
96
+
97
+ config.pool_size = ENV['NATSWORK_POOL_SIZE'].to_i if ENV['NATSWORK_POOL_SIZE']
98
+ config.pool_timeout = ENV['NATSWORK_POOL_TIMEOUT'].to_i if ENV['NATSWORK_POOL_TIMEOUT']
99
+ config.max_reconnect_attempts = ENV['NATSWORK_MAX_RECONNECT'].to_i if ENV['NATSWORK_MAX_RECONNECT']
100
+ config.reconnect_time_wait = ENV['NATSWORK_RECONNECT_WAIT'].to_i if ENV['NATSWORK_RECONNECT_WAIT']
101
+ config.request_timeout = ENV['NATSWORK_REQUEST_TIMEOUT'].to_i if ENV['NATSWORK_REQUEST_TIMEOUT']
102
+
103
+ config.user = ENV['NATSWORK_USER'] if ENV['NATSWORK_USER']
104
+ config.password = ENV['NATSWORK_PASSWORD'] if ENV['NATSWORK_PASSWORD']
105
+ config.token = ENV['NATSWORK_TOKEN'] if ENV['NATSWORK_TOKEN']
106
+
107
+ if ENV['NATSWORK_JETSTREAM_ENABLED']
108
+ config.jetstream_enabled = ENV['NATSWORK_JETSTREAM_ENABLED'].downcase == 'true'
109
+ end
110
+
111
+ config.jetstream_prefix = ENV['NATSWORK_JETSTREAM_PREFIX'] if ENV['NATSWORK_JETSTREAM_PREFIX']
112
+
113
+ config
114
+ end
115
+ end
116
+ end
117
+ end