fractor 0.1.6 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (172) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop_todo.yml +227 -102
  3. data/README.adoc +113 -1940
  4. data/docs/.lycheeignore +16 -0
  5. data/docs/Gemfile +24 -0
  6. data/docs/README.md +157 -0
  7. data/docs/_config.yml +151 -0
  8. data/docs/_features/error-handling.adoc +1192 -0
  9. data/docs/_features/index.adoc +80 -0
  10. data/docs/_features/monitoring.adoc +589 -0
  11. data/docs/_features/signal-handling.adoc +202 -0
  12. data/docs/_features/workflows.adoc +1235 -0
  13. data/docs/_guides/continuous-mode.adoc +736 -0
  14. data/docs/_guides/cookbook.adoc +1133 -0
  15. data/docs/_guides/index.adoc +55 -0
  16. data/docs/_guides/pipeline-mode.adoc +730 -0
  17. data/docs/_guides/troubleshooting.adoc +358 -0
  18. data/docs/_pages/architecture.adoc +1390 -0
  19. data/docs/_pages/core-concepts.adoc +1392 -0
  20. data/docs/_pages/design-principles.adoc +862 -0
  21. data/docs/_pages/getting-started.adoc +290 -0
  22. data/docs/_pages/installation.adoc +143 -0
  23. data/docs/_reference/api.adoc +1080 -0
  24. data/docs/_reference/error-reporting.adoc +670 -0
  25. data/docs/_reference/examples.adoc +181 -0
  26. data/docs/_reference/index.adoc +96 -0
  27. data/docs/_reference/troubleshooting.adoc +862 -0
  28. data/docs/_tutorials/complex-workflows.adoc +1022 -0
  29. data/docs/_tutorials/data-processing-pipeline.adoc +740 -0
  30. data/docs/_tutorials/first-application.adoc +384 -0
  31. data/docs/_tutorials/index.adoc +48 -0
  32. data/docs/_tutorials/long-running-services.adoc +931 -0
  33. data/docs/assets/images/favicon-16.png +0 -0
  34. data/docs/assets/images/favicon-32.png +0 -0
  35. data/docs/assets/images/favicon-48.png +0 -0
  36. data/docs/assets/images/favicon.ico +0 -0
  37. data/docs/assets/images/favicon.png +0 -0
  38. data/docs/assets/images/favicon.svg +45 -0
  39. data/docs/assets/images/fractor-icon.svg +49 -0
  40. data/docs/assets/images/fractor-logo.svg +61 -0
  41. data/docs/index.adoc +131 -0
  42. data/docs/lychee.toml +39 -0
  43. data/examples/api_aggregator/README.adoc +627 -0
  44. data/examples/api_aggregator/api_aggregator.rb +376 -0
  45. data/examples/auto_detection/README.adoc +407 -29
  46. data/examples/continuous_chat_common/message_protocol.rb +1 -1
  47. data/examples/error_reporting.rb +207 -0
  48. data/examples/file_processor/README.adoc +170 -0
  49. data/examples/file_processor/file_processor.rb +615 -0
  50. data/examples/file_processor/sample_files/invalid.csv +1 -0
  51. data/examples/file_processor/sample_files/orders.xml +24 -0
  52. data/examples/file_processor/sample_files/products.json +23 -0
  53. data/examples/file_processor/sample_files/users.csv +6 -0
  54. data/examples/hierarchical_hasher/README.adoc +629 -41
  55. data/examples/image_processor/README.adoc +610 -0
  56. data/examples/image_processor/image_processor.rb +349 -0
  57. data/examples/image_processor/processed_images/sample_10_processed.jpg.json +12 -0
  58. data/examples/image_processor/processed_images/sample_1_processed.jpg.json +12 -0
  59. data/examples/image_processor/processed_images/sample_2_processed.jpg.json +12 -0
  60. data/examples/image_processor/processed_images/sample_3_processed.jpg.json +12 -0
  61. data/examples/image_processor/processed_images/sample_4_processed.jpg.json +12 -0
  62. data/examples/image_processor/processed_images/sample_5_processed.jpg.json +12 -0
  63. data/examples/image_processor/processed_images/sample_6_processed.jpg.json +12 -0
  64. data/examples/image_processor/processed_images/sample_7_processed.jpg.json +12 -0
  65. data/examples/image_processor/processed_images/sample_8_processed.jpg.json +12 -0
  66. data/examples/image_processor/processed_images/sample_9_processed.jpg.json +12 -0
  67. data/examples/image_processor/test_images/sample_1.png +1 -0
  68. data/examples/image_processor/test_images/sample_10.png +1 -0
  69. data/examples/image_processor/test_images/sample_2.png +1 -0
  70. data/examples/image_processor/test_images/sample_3.png +1 -0
  71. data/examples/image_processor/test_images/sample_4.png +1 -0
  72. data/examples/image_processor/test_images/sample_5.png +1 -0
  73. data/examples/image_processor/test_images/sample_6.png +1 -0
  74. data/examples/image_processor/test_images/sample_7.png +1 -0
  75. data/examples/image_processor/test_images/sample_8.png +1 -0
  76. data/examples/image_processor/test_images/sample_9.png +1 -0
  77. data/examples/log_analyzer/README.adoc +662 -0
  78. data/examples/log_analyzer/log_analyzer.rb +579 -0
  79. data/examples/log_analyzer/sample_logs/apache.log +20 -0
  80. data/examples/log_analyzer/sample_logs/json.log +15 -0
  81. data/examples/log_analyzer/sample_logs/nginx.log +15 -0
  82. data/examples/log_analyzer/sample_logs/rails.log +29 -0
  83. data/examples/multi_work_type/README.adoc +576 -26
  84. data/examples/performance_monitoring.rb +120 -0
  85. data/examples/pipeline_processing/README.adoc +740 -26
  86. data/examples/pipeline_processing/pipeline_processing.rb +2 -2
  87. data/examples/priority_work_example.rb +155 -0
  88. data/examples/producer_subscriber/README.adoc +889 -46
  89. data/examples/scatter_gather/README.adoc +829 -27
  90. data/examples/simple/README.adoc +347 -0
  91. data/examples/specialized_workers/README.adoc +622 -26
  92. data/examples/specialized_workers/specialized_workers.rb +44 -8
  93. data/examples/stream_processor/README.adoc +206 -0
  94. data/examples/stream_processor/stream_processor.rb +284 -0
  95. data/examples/web_scraper/README.adoc +625 -0
  96. data/examples/web_scraper/web_scraper.rb +285 -0
  97. data/examples/workflow/README.adoc +406 -0
  98. data/examples/workflow/circuit_breaker/README.adoc +360 -0
  99. data/examples/workflow/circuit_breaker/circuit_breaker_workflow.rb +225 -0
  100. data/examples/workflow/conditional/README.adoc +483 -0
  101. data/examples/workflow/conditional/conditional_workflow.rb +215 -0
  102. data/examples/workflow/dead_letter_queue/README.adoc +374 -0
  103. data/examples/workflow/dead_letter_queue/dead_letter_queue_workflow.rb +217 -0
  104. data/examples/workflow/fan_out/README.adoc +381 -0
  105. data/examples/workflow/fan_out/fan_out_workflow.rb +202 -0
  106. data/examples/workflow/retry/README.adoc +248 -0
  107. data/examples/workflow/retry/retry_workflow.rb +195 -0
  108. data/examples/workflow/simple_linear/README.adoc +267 -0
  109. data/examples/workflow/simple_linear/simple_linear_workflow.rb +175 -0
  110. data/examples/workflow/simplified/README.adoc +329 -0
  111. data/examples/workflow/simplified/simplified_workflow.rb +222 -0
  112. data/exe/fractor +10 -0
  113. data/lib/fractor/cli.rb +288 -0
  114. data/lib/fractor/configuration.rb +307 -0
  115. data/lib/fractor/continuous_server.rb +60 -65
  116. data/lib/fractor/error_formatter.rb +72 -0
  117. data/lib/fractor/error_report_generator.rb +152 -0
  118. data/lib/fractor/error_reporter.rb +244 -0
  119. data/lib/fractor/error_statistics.rb +147 -0
  120. data/lib/fractor/execution_tracer.rb +162 -0
  121. data/lib/fractor/logger.rb +230 -0
  122. data/lib/fractor/main_loop_handler.rb +406 -0
  123. data/lib/fractor/main_loop_handler3.rb +135 -0
  124. data/lib/fractor/main_loop_handler4.rb +299 -0
  125. data/lib/fractor/performance_metrics_collector.rb +181 -0
  126. data/lib/fractor/performance_monitor.rb +215 -0
  127. data/lib/fractor/performance_report_generator.rb +202 -0
  128. data/lib/fractor/priority_work.rb +93 -0
  129. data/lib/fractor/priority_work_queue.rb +189 -0
  130. data/lib/fractor/result_aggregator.rb +32 -0
  131. data/lib/fractor/shutdown_handler.rb +168 -0
  132. data/lib/fractor/signal_handler.rb +80 -0
  133. data/lib/fractor/supervisor.rb +382 -269
  134. data/lib/fractor/supervisor_logger.rb +88 -0
  135. data/lib/fractor/version.rb +1 -1
  136. data/lib/fractor/work.rb +12 -0
  137. data/lib/fractor/work_distribution_manager.rb +151 -0
  138. data/lib/fractor/work_queue.rb +20 -0
  139. data/lib/fractor/work_result.rb +181 -9
  140. data/lib/fractor/worker.rb +73 -0
  141. data/lib/fractor/workflow/builder.rb +210 -0
  142. data/lib/fractor/workflow/chain_builder.rb +169 -0
  143. data/lib/fractor/workflow/circuit_breaker.rb +183 -0
  144. data/lib/fractor/workflow/circuit_breaker_orchestrator.rb +208 -0
  145. data/lib/fractor/workflow/circuit_breaker_registry.rb +112 -0
  146. data/lib/fractor/workflow/dead_letter_queue.rb +334 -0
  147. data/lib/fractor/workflow/execution_hooks.rb +39 -0
  148. data/lib/fractor/workflow/execution_strategy.rb +225 -0
  149. data/lib/fractor/workflow/execution_trace.rb +134 -0
  150. data/lib/fractor/workflow/helpers.rb +191 -0
  151. data/lib/fractor/workflow/job.rb +290 -0
  152. data/lib/fractor/workflow/job_dependency_validator.rb +120 -0
  153. data/lib/fractor/workflow/logger.rb +110 -0
  154. data/lib/fractor/workflow/pre_execution_context.rb +193 -0
  155. data/lib/fractor/workflow/retry_config.rb +156 -0
  156. data/lib/fractor/workflow/retry_orchestrator.rb +184 -0
  157. data/lib/fractor/workflow/retry_strategy.rb +93 -0
  158. data/lib/fractor/workflow/structured_logger.rb +30 -0
  159. data/lib/fractor/workflow/type_compatibility_validator.rb +222 -0
  160. data/lib/fractor/workflow/visualizer.rb +211 -0
  161. data/lib/fractor/workflow/workflow_context.rb +132 -0
  162. data/lib/fractor/workflow/workflow_executor.rb +669 -0
  163. data/lib/fractor/workflow/workflow_result.rb +55 -0
  164. data/lib/fractor/workflow/workflow_validator.rb +295 -0
  165. data/lib/fractor/workflow.rb +333 -0
  166. data/lib/fractor/wrapped_ractor.rb +66 -101
  167. data/lib/fractor/wrapped_ractor3.rb +161 -0
  168. data/lib/fractor/wrapped_ractor4.rb +242 -0
  169. data/lib/fractor.rb +92 -4
  170. metadata +179 -6
  171. data/tests/sample.rb.bak +0 -309
  172. data/tests/sample_working.rb.bak +0 -209
@@ -0,0 +1,931 @@
1
+ ---
2
+ layout: default
3
+ title: Creating Long-Running Services
4
+ nav_order: 6
5
+ ---
6
+
7
+ == Creating Long-Running Services
8
+
9
+ === Overview
10
+
11
+ In this 30-minute intermediate tutorial, you'll build a production-ready background job processor that runs continuously, processing jobs as they arrive. This demonstrates how to use Fractor's continuous mode for long-running services.
12
+
13
+ **What you'll learn:**
14
+
15
+ * Using continuous mode for indefinite operation
16
+ * Implementing WorkQueue for dynamic work submission
17
+ * Using ContinuousServer for simplified server management
18
+ * Handling graceful shutdown and signals
19
+ * Monitoring server health and performance
20
+ * Production deployment patterns
21
+
22
+ **Prerequisites:**
23
+
24
+ * Completed link:../../getting-started[Getting Started] tutorial
25
+ * Basic understanding of link:../../guides/core-concepts[Core Concepts]
26
+ * Familiarity with link:../../guides/continuous-mode[Continuous Mode]
27
+
28
+ === The Problem
29
+
30
+ You need to build a background job processor that:
31
+
32
+ 1. Runs continuously as a daemon/service
33
+ 2. Accepts job submissions from multiple sources
34
+ 3. Processes jobs in parallel
35
+ 4. Handles errors gracefully
36
+ 5. Provides health monitoring
37
+ 6. Supports graceful shutdown
38
+ 7. Is production-ready with logging and metrics
39
+
40
+ === Step 1: Set Up the Project
41
+
42
+ Create the project structure:
43
+
44
+ [source,sh]
45
+ ----
46
+ mkdir job_processor
47
+ cd job_processor
48
+ mkdir -p lib logs
49
+ touch lib/job_processor.rb
50
+ touch lib/jobs.rb
51
+ touch lib/workers.rb
52
+ ----
53
+
54
+ Install Fractor:
55
+
56
+ [source,sh]
57
+ ----
58
+ gem install fractor
59
+ ----
60
+
61
+ === Step 2: Define Job Types
62
+
63
+ Create `lib/jobs.rb` with different job types:
64
+
65
+ [source,ruby]
66
+ ----
67
+ require 'fractor'
68
+ require 'json'
69
+ require 'net/http'
70
+
71
+ # Base job class
72
+ class Job < Fractor::Work
73
+ attr_reader :job_id, :job_type, :created_at
74
+
75
+ def initialize(job_id:, job_type:, params: {})
76
+ @job_id = job_id
77
+ @job_type = job_type
78
+ @created_at = Time.now
79
+ super(job_id: job_id, job_type: job_type, params: params)
80
+ end
81
+
82
+ def params
83
+ input[:params]
84
+ end
85
+
86
+ def to_s
87
+ "Job #{job_id} (#{job_type})"
88
+ end
89
+ end
90
+
91
+ # Email sending job
92
+ class EmailJob < Job
93
+ def initialize(job_id:, to:, subject:, body:)
94
+ super(
95
+ job_id: job_id,
96
+ job_type: :email,
97
+ params: { to: to, subject: subject, body: body }
98
+ )
99
+ end
100
+
101
+ def to
102
+ params[:to]
103
+ end
104
+
105
+ def subject
106
+ params[:subject]
107
+ end
108
+
109
+ def body
110
+ params[:body]
111
+ end
112
+ end
113
+
114
+ # Report generation job
115
+ class ReportJob < Job
116
+ def initialize(job_id:, report_type:, date_range:)
117
+ super(
118
+ job_id: job_id,
119
+ job_type: :report,
120
+ params: { report_type: report_type, date_range: date_range }
121
+ )
122
+ end
123
+
124
+ def report_type
125
+ params[:report_type]
126
+ end
127
+
128
+ def date_range
129
+ params[:date_range]
130
+ end
131
+ end
132
+
133
+ # Data export job
134
+ class ExportJob < Job
135
+ def initialize(job_id:, format:, filters:)
136
+ super(
137
+ job_id: job_id,
138
+ job_type: :export,
139
+ params: { format: format, filters: filters }
140
+ )
141
+ end
142
+
143
+ def format
144
+ params[:format]
145
+ end
146
+
147
+ def filters
148
+ params[:filters]
149
+ end
150
+ end
151
+
152
+ # Webhook notification job
153
+ class WebhookJob < Job
154
+ def initialize(job_id:, url:, payload:)
155
+ super(
156
+ job_id: job_id,
157
+ job_type: :webhook,
158
+ params: { url: url, payload: payload }
159
+ )
160
+ end
161
+
162
+ def url
163
+ params[:url]
164
+ end
165
+
166
+ def payload
167
+ params[:payload]
168
+ end
169
+ end
170
+ ----
171
+
172
+ === Step 3: Create Specialized Workers
173
+
174
+ Create `lib/workers.rb` with workers for each job type:
175
+
176
+ [source,ruby]
177
+ ----
178
+ require 'fractor'
179
+ require 'net/http'
180
+ require 'json'
181
+ require_relative 'jobs'
182
+
183
+ # Worker for email jobs
184
+ class EmailWorker < Fractor::Worker
185
+ def process(work)
186
+ job = work
187
+
188
+ puts " 📧 Sending email to #{job.to}"
189
+ puts " Subject: #{job.subject}"
190
+
191
+ # Simulate email sending
192
+ sleep(0.5 + rand * 0.5)
193
+
194
+ # Simulate occasional failures
195
+ if rand < 0.1
196
+ raise "SMTP connection failed"
197
+ end
198
+
199
+ Fractor::WorkResult.new(
200
+ result: {
201
+ job_id: job.job_id,
202
+ status: 'sent',
203
+ recipient: job.to,
204
+ sent_at: Time.now
205
+ },
206
+ work: work
207
+ )
208
+ rescue => e
209
+ Fractor::WorkResult.new(
210
+ error: e,
211
+ error_code: :email_failed,
212
+ error_context: {
213
+ job_id: job.job_id,
214
+ recipient: job.to
215
+ },
216
+ work: work
217
+ )
218
+ end
219
+ end
220
+
221
+ # Worker for report generation jobs
222
+ class ReportWorker < Fractor::Worker
223
+ def process(work)
224
+ job = work
225
+
226
+ puts " 📊 Generating #{job.report_type} report"
227
+ puts " Date range: #{job.date_range}"
228
+
229
+ # Simulate report generation
230
+ sleep(1.0 + rand * 2.0)
231
+
232
+ report_file = "reports/#{job.job_id}_#{job.report_type}.pdf"
233
+
234
+ Fractor::WorkResult.new(
235
+ result: {
236
+ job_id: job.job_id,
237
+ status: 'completed',
238
+ report_file: report_file,
239
+ generated_at: Time.now
240
+ },
241
+ work: work
242
+ )
243
+ rescue => e
244
+ Fractor::WorkResult.new(
245
+ error: e,
246
+ error_code: :report_failed,
247
+ error_context: {
248
+ job_id: job.job_id,
249
+ report_type: job.report_type
250
+ },
251
+ work: work
252
+ )
253
+ end
254
+ end
255
+
256
+ # Worker for data export jobs
257
+ class ExportWorker < Fractor::Worker
258
+ def process(work)
259
+ job = work
260
+
261
+ puts " 💾 Exporting data to #{job.format}"
262
+ puts " Filters: #{job.filters.inspect}"
263
+
264
+ # Simulate export
265
+ sleep(0.8 + rand * 1.2)
266
+
267
+ export_file = "exports/#{job.job_id}.#{job.format}"
268
+
269
+ Fractor::WorkResult.new(
270
+ result: {
271
+ job_id: job.job_id,
272
+ status: 'exported',
273
+ export_file: export_file,
274
+ exported_at: Time.now
275
+ },
276
+ work: work
277
+ )
278
+ rescue => e
279
+ Fractor::WorkResult.new(
280
+ error: e,
281
+ error_code: :export_failed,
282
+ error_context: {
283
+ job_id: job.job_id,
284
+ format: job.format
285
+ },
286
+ work: work
287
+ )
288
+ end
289
+ end
290
+
291
+ # Worker for webhook notifications
292
+ class WebhookWorker < Fractor::Worker
293
+ def process(work)
294
+ job = work
295
+
296
+ puts " 🔔 Sending webhook to #{job.url}"
297
+
298
+ # Simulate webhook call
299
+ sleep(0.3 + rand * 0.3)
300
+
301
+ # In production: send actual HTTP request
302
+ # uri = URI(job.url)
303
+ # response = Net::HTTP.post(uri, job.payload.to_json, 'Content-Type' => 'application/json')
304
+
305
+ Fractor::WorkResult.new(
306
+ result: {
307
+ job_id: job.job_id,
308
+ status: 'delivered',
309
+ url: job.url,
310
+ delivered_at: Time.now
311
+ },
312
+ work: work
313
+ )
314
+ rescue => e
315
+ Fractor::WorkResult.new(
316
+ error: e,
317
+ error_code: :webhook_failed,
318
+ error_context: {
319
+ job_id: job.job_id,
320
+ url: job.url
321
+ },
322
+ work: work
323
+ )
324
+ end
325
+ end
326
+ ----
327
+
328
+ === Step 4: Build the Job Processor Service
329
+
330
+ Create `lib/job_processor.rb`:
331
+
332
+ [source,ruby]
333
+ ----
334
+ require 'fractor'
335
+ require_relative 'workers'
336
+ require_relative 'jobs'
337
+
338
+ class JobProcessorService
339
+ attr_reader :work_queue, :server, :stats
340
+
341
+ def initialize
342
+ @work_queue = Fractor::WorkQueue.new
343
+ @stats = {
344
+ jobs_submitted: 0,
345
+ jobs_completed: 0,
346
+ jobs_failed: 0,
347
+ started_at: Time.now
348
+ }
349
+
350
+ setup_server
351
+ end
352
+
353
+ def start
354
+ puts "=" * 60
355
+ puts "Job Processor Service Starting"
356
+ puts "=" * 60
357
+ puts "Started at: #{@stats[:started_at]}"
358
+ puts "Workers:"
359
+ puts " - Email workers: 2"
360
+ puts " - Report workers: 2"
361
+ puts " - Export workers: 2"
362
+ puts " - Webhook workers: 4"
363
+ puts ""
364
+ puts "Press Ctrl+C to gracefully shutdown"
365
+ puts "=" * 60
366
+ puts ""
367
+
368
+ # Start the server (blocks until shutdown)
369
+ @server.run
370
+ end
371
+
372
+ def submit_job(job)
373
+ @work_queue << job
374
+ @stats[:jobs_submitted] += 1
375
+ puts "✓ Submitted #{job}"
376
+ end
377
+
378
+ def stop
379
+ puts "\nInitiating graceful shutdown..."
380
+ @server.stop
381
+ end
382
+
383
+ private
384
+
385
+ def setup_server
386
+ @server = Fractor::ContinuousServer.new(
387
+ worker_pools: [
388
+ { worker_class: EmailWorker, num_workers: 2 },
389
+ { worker_class: ReportWorker, num_workers: 2 },
390
+ { worker_class: ExportWorker, num_workers: 2 },
391
+ { worker_class: WebhookWorker, num_workers: 4 }
392
+ ],
393
+ work_queue: @work_queue,
394
+ log_file: 'logs/job_processor.log'
395
+ )
396
+
397
+ # Handle successful job completions
398
+ @server.on_result do |result|
399
+ @stats[:jobs_completed] += 1
400
+ job_id = result.result[:job_id]
401
+ status = result.result[:status]
402
+
403
+ puts " ✓ Job #{job_id} completed: #{status}"
404
+
405
+ # Send notification, update database, etc.
406
+ notify_job_completion(result)
407
+ end
408
+
409
+ # Handle job failures
410
+ @server.on_error do |error_result|
411
+ @stats[:jobs_failed] += 1
412
+ job_id = error_result.error_context[:job_id]
413
+ error_code = error_result.error_code
414
+
415
+ puts " ✗ Job #{job_id} failed: #{error_code}"
416
+ puts " Error: #{error_result.error.message}"
417
+
418
+ # Log error, send alert, retry logic, etc.
419
+ handle_job_failure(error_result)
420
+ end
421
+ end
422
+
423
+ def notify_job_completion(result)
424
+ # In production: send notification, update database, etc.
425
+ # Database.update_job_status(result.result[:job_id], 'completed')
426
+ # NotificationService.send(result.result)
427
+ end
428
+
429
+ def handle_job_failure(error_result)
430
+ # In production: log to error tracking service, implement retry logic
431
+ # ErrorTracker.notify(error_result)
432
+ # RetryQueue.add(error_result.work) if error_result.retriable?
433
+ end
434
+
435
+ def print_stats
436
+ uptime = Time.now - @stats[:started_at]
437
+
438
+ puts "\n" + "=" * 60
439
+ puts "Service Statistics"
440
+ puts "=" * 60
441
+ puts "Uptime: #{format_duration(uptime)}"
442
+ puts "Jobs submitted: #{@stats[:jobs_submitted]}"
443
+ puts "Jobs completed: #{@stats[:jobs_completed]}"
444
+ puts "Jobs failed: #{@stats[:jobs_failed]}"
445
+ puts "Queue depth: #{@work_queue.size}"
446
+ puts "Success rate: #{success_rate}%"
447
+ puts "=" * 60
448
+ end
449
+
450
+ def format_duration(seconds)
451
+ hours = (seconds / 3600).to_i
452
+ minutes = ((seconds % 3600) / 60).to_i
453
+ secs = (seconds % 60).to_i
454
+ "#{hours}h #{minutes}m #{secs}s"
455
+ end
456
+
457
+ def success_rate
458
+ total = @stats[:jobs_completed] + @stats[:jobs_failed]
459
+ return 0 if total == 0
460
+ ((@stats[:jobs_completed].to_f / total) * 100).round(2)
461
+ end
462
+ end
463
+ ----
464
+
465
+ === Step 5: Create the Service Runner
466
+
467
+ Create `run_service.rb`:
468
+
469
+ [source,ruby]
470
+ ----
471
+ require_relative 'lib/job_processor'
472
+
473
+ # Create the service
474
+ service = JobProcessorService.new
475
+
476
+ # Handle shutdown signal
477
+ trap('INT') do
478
+ service.stop
479
+ end
480
+
481
+ # Start the service in a background thread
482
+ service_thread = Thread.new do
483
+ service.start
484
+ end
485
+
486
+ # Simulate job submissions (in production, this would be an API, queue, etc.)
487
+ job_simulator = Thread.new do
488
+ sleep 2 # Wait for service to start
489
+
490
+ job_id = 1
491
+
492
+ loop do
493
+ sleep(1 + rand * 2) # Random interval between jobs
494
+
495
+ # Randomly submit different job types
496
+ case rand(4)
497
+ when 0
498
+ job = EmailJob.new(
499
+ job_id: job_id,
500
+ to: "user#{job_id}@example.com",
501
+ subject: "Test Email #{job_id}",
502
+ body: "This is a test email"
503
+ )
504
+ when 1
505
+ job = ReportJob.new(
506
+ job_id: job_id,
507
+ report_type: %w[sales inventory users].sample,
508
+ date_range: "#{Date.today - 7} to #{Date.today}"
509
+ )
510
+ when 2
511
+ job = ExportJob.new(
512
+ job_id: job_id,
513
+ format: %w[csv json xml].sample,
514
+ filters: { status: 'active' }
515
+ )
516
+ else
517
+ job = WebhookJob.new(
518
+ job_id: job_id,
519
+ url: "https://webhook.site/test",
520
+ payload: { event: 'test', id: job_id }
521
+ )
522
+ end
523
+
524
+ service.submit_job(job)
525
+ job_id += 1
526
+
527
+ # Simulate burst traffic occasionally
528
+ if rand < 0.2
529
+ 5.times do
530
+ burst_job = EmailJob.new(
531
+ job_id: job_id,
532
+ to: "burst#{job_id}@example.com",
533
+ subject: "Burst Email",
534
+ body: "Burst traffic test"
535
+ )
536
+ service.submit_job(burst_job)
537
+ job_id += 1
538
+ end
539
+ end
540
+ end
541
+ end
542
+
543
+ # Wait for service thread
544
+ service_thread.join
545
+
546
+ # Cleanup
547
+ job_simulator.kill
548
+ puts "\nService stopped."
549
+ ----
550
+
551
+ === Step 6: Add Health Monitoring
552
+
553
+ Create `lib/health_monitor.rb`:
554
+
555
+ [source,ruby]
556
+ ----
557
+ require 'fractor'
558
+
559
+ class HealthMonitor
560
+ def initialize(service)
561
+ @service = service
562
+ @monitor = nil
563
+ end
564
+
565
+ def start
566
+ return if @monitor
567
+
568
+ @monitor = Fractor::PerformanceMonitor.new(
569
+ @service.server.supervisor,
570
+ sample_interval: 5.0
571
+ )
572
+
573
+ @monitor.start
574
+
575
+ # Start monitoring thread
576
+ @monitor_thread = Thread.new do
577
+ loop do
578
+ sleep 30 # Report every 30 seconds
579
+
580
+ print_health_report
581
+ end
582
+ end
583
+ end
584
+
585
+ def stop
586
+ @monitor&.stop
587
+ @monitor_thread&.kill
588
+ end
589
+
590
+ def print_health_report
591
+ snapshot = @monitor.snapshot
592
+
593
+ puts "\n" + "=" * 60
594
+ puts "Health Check"
595
+ puts "=" * 60
596
+ puts "Time: #{Time.now}"
597
+ puts "Jobs processed: #{snapshot[:jobs_processed]}"
598
+ puts "Jobs succeeded: #{snapshot[:jobs_succeeded]}"
599
+ puts "Jobs failed: #{snapshot[:jobs_failed]}"
600
+ puts "Throughput: #{snapshot[:throughput].round(2)} jobs/sec"
601
+ puts "Queue depth: #{snapshot[:queue_depth]}"
602
+ puts "Worker count: #{snapshot[:worker_count]}"
603
+ puts "Active workers: #{snapshot[:active_workers]}"
604
+ puts "Utilization: #{snapshot[:worker_utilization].round(2)}%"
605
+ puts "Memory: #{snapshot[:memory_mb].round(2)} MB"
606
+ puts "=" * 60
607
+ end
608
+
609
+ def export_metrics
610
+ @monitor.to_prometheus
611
+ end
612
+ end
613
+ ----
614
+
615
+ === Step 7: Add HTTP Health Endpoint
616
+
617
+ Create `lib/http_server.rb` for health checks:
618
+
619
+ [source,ruby]
620
+ ----
621
+ require 'webrick'
622
+ require 'json'
623
+
624
+ class HealthEndpoint
625
+ def initialize(service, monitor, port: 9090)
626
+ @service = service
627
+ @monitor = monitor
628
+ @port = port
629
+ end
630
+
631
+ def start
632
+ @server = WEBrick::HTTPServer.new(Port: @port)
633
+
634
+ # Health check endpoint
635
+ @server.mount_proc '/health' do |req, res|
636
+ res['Content-Type'] = 'application/json'
637
+ res.body = health_status.to_json
638
+ end
639
+
640
+ # Metrics endpoint (Prometheus format)
641
+ @server.mount_proc '/metrics' do |req, res|
642
+ res['Content-Type'] = 'text/plain; version=0.0.4'
643
+ res.body = @monitor.export_metrics
644
+ end
645
+
646
+ # Stats endpoint
647
+ @server.mount_proc '/stats' do |req, res|
648
+ res['Content-Type'] = 'application/json'
649
+ res.body = @service.stats.to_json
650
+ end
651
+
652
+ Thread.new { @server.start }
653
+
654
+ puts "Health endpoint: http://localhost:#{@port}/health"
655
+ puts "Metrics endpoint: http://localhost:#{@port}/metrics"
656
+ puts "Stats endpoint: http://localhost:#{@port}/stats"
657
+ end
658
+
659
+ def stop
660
+ @server&.shutdown
661
+ end
662
+
663
+ private
664
+
665
+ def health_status
666
+ {
667
+ status: 'healthy',
668
+ timestamp: Time.now.iso8601,
669
+ uptime: Time.now - @service.stats[:started_at],
670
+ queue_depth: @service.work_queue.size,
671
+ stats: @service.stats
672
+ }
673
+ end
674
+ end
675
+ ----
676
+
677
+ === Step 8: Run the Service
678
+
679
+ Run the service:
680
+
681
+ [source,sh]
682
+ ----
683
+ ruby run_service.rb
684
+ ----
685
+
686
+ Expected output:
687
+
688
+ [source]
689
+ ----
690
+ ============================================================
691
+ Job Processor Service Starting
692
+ ============================================================
693
+ Started at: 2024-01-15 10:30:00 +0000
694
+ Workers:
695
+ - Email workers: 2
696
+ - Report workers: 2
697
+ - Export workers: 2
698
+ - Webhook workers: 4
699
+
700
+ Press Ctrl+C to gracefully shutdown
701
+ ============================================================
702
+
703
+ Health endpoint: http://localhost:9090/health
704
+ Metrics endpoint: http://localhost:9090/metrics
705
+ Stats endpoint: http://localhost:9090/stats
706
+
707
+ ✓ Submitted Job 1 (email)
708
+ 📧 Sending email to user1@example.com
709
+ Subject: Test Email 1
710
+ ✓ Job 1 completed: sent
711
+
712
+ ✓ Submitted Job 2 (report)
713
+ 📊 Generating sales report
714
+ Date range: 2024-01-08 to 2024-01-15
715
+ ✓ Job 2 completed: completed
716
+
717
+ ... (continues running)
718
+ ----
719
+
720
+ In another terminal, check health:
721
+
722
+ [source,sh]
723
+ ----
724
+ curl http://localhost:9090/health
725
+ ----
726
+
727
+ === Step 9: Production Deployment
728
+
729
+ ==== Systemd Service File
730
+
731
+ Create `/etc/systemd/system/job-processor.service`:
732
+
733
+ [source,ini]
734
+ ----
735
+ [Unit]
736
+ Description=Job Processor Service
737
+ After=network.target
738
+
739
+ [Service]
740
+ Type=simple
741
+ User=appuser
742
+ WorkingDirectory=/opt/job_processor
743
+ ExecStart=/usr/bin/ruby /opt/job_processor/run_service.rb
744
+ Restart=always
745
+ RestartSec=10
746
+ StandardOutput=append:/var/log/job_processor/output.log
747
+ StandardError=append:/var/log/job_processor/error.log
748
+
749
+ # Graceful shutdown
750
+ KillMode=process
751
+ KillSignal=SIGTERM
752
+ TimeoutStopSec=30
753
+
754
+ [Install]
755
+ WantedBy=multi-user.target
756
+ ----
757
+
758
+ Start the service:
759
+
760
+ [source,sh]
761
+ ----
762
+ sudo systemctl daemon-reload
763
+ sudo systemctl start job-processor
764
+ sudo systemctl enable job-processor
765
+ sudo systemctl status job-processor
766
+ ----
767
+
768
+ ==== Docker Deployment
769
+
770
+ Create `Dockerfile`:
771
+
772
+ [source,dockerfile]
773
+ ----
774
+ FROM ruby:3.2-slim
775
+
776
+ WORKDIR /app
777
+
778
+ # Install dependencies
779
+ COPY Gemfile* ./
780
+ RUN bundle install
781
+
782
+ # Copy application
783
+ COPY . .
784
+
785
+ # Create logs directory
786
+ RUN mkdir -p logs
787
+
788
+ # Expose health check port
789
+ EXPOSE 9090
790
+
791
+ # Run service
792
+ CMD ["ruby", "run_service.rb"]
793
+ ----
794
+
795
+ Create `docker-compose.yml`:
796
+
797
+ [source,yaml]
798
+ ----
799
+ version: '3.8'
800
+
801
+ services:
802
+ job-processor:
803
+ build: .
804
+ ports:
805
+ - "9090:9090"
806
+ volumes:
807
+ - ./logs:/app/logs
808
+ restart: unless-stopped
809
+ environment:
810
+ - FRACTOR_LOG_FILE=/app/logs/job_processor.log
811
+ healthcheck:
812
+ test: ["CMD", "curl", "-f", "http://localhost:9090/health"]
813
+ interval: 30s
814
+ timeout: 10s
815
+ retries: 3
816
+ ----
817
+
818
+ Run with Docker:
819
+
820
+ [source,sh]
821
+ ----
822
+ docker-compose up -d
823
+ docker-compose logs -f
824
+ ----
825
+
826
+ === Best Practices Demonstrated
827
+
828
+ ==== 1. Graceful Shutdown
829
+
830
+ ContinuousServer handles signals automatically:
831
+
832
+ * **SIGTERM**: Graceful shutdown (completes in-progress work)
833
+ * **SIGINT**: Interactive shutdown (Ctrl+C)
834
+ * **SIGUSR1**: Status output (health check)
835
+
836
+ ==== 2. Health Monitoring
837
+
838
+ Multiple monitoring approaches:
839
+
840
+ * Built-in PerformanceMonitor
841
+ * HTTP health endpoints
842
+ * Prometheus metrics export
843
+
844
+ ==== 3. Error Handling
845
+
846
+ Comprehensive error handling with callbacks:
847
+
848
+ [source,ruby]
849
+ ----
850
+ server.on_error do |error_result|
851
+ # Log, alert, retry, or add to DLQ
852
+ end
853
+ ----
854
+
855
+ ==== 4. Thread Safety
856
+
857
+ WorkQueue is thread-safe for concurrent job submission:
858
+
859
+ [source,ruby]
860
+ ----
861
+ # Safe from multiple threads
862
+ threads.map do
863
+ Thread.new { work_queue << job }
864
+ end
865
+ ----
866
+
867
+ === Production Considerations
868
+
869
+ ==== 1. Logging
870
+
871
+ Configure structured logging:
872
+
873
+ [source,ruby]
874
+ ----
875
+ server = Fractor::ContinuousServer.new(
876
+ # ...
877
+ log_file: '/var/log/job_processor/app.log'
878
+ )
879
+ ----
880
+
881
+ ==== 2. Metrics
882
+
883
+ Export to monitoring systems:
884
+
885
+ [source,ruby]
886
+ ----
887
+ # Prometheus
888
+ File.write('metrics.prom', monitor.to_prometheus)
889
+
890
+ # JSON for custom systems
891
+ File.write('metrics.json', monitor.to_json)
892
+ ----
893
+
894
+ ==== 3. Resource Limits
895
+
896
+ Configure worker pools based on resources:
897
+
898
+ [source,ruby]
899
+ ----
900
+ worker_pools: [
901
+ { worker_class: CPUIntensiveWorker, num_workers: 4 }, # CPU-bound
902
+ { worker_class: IOBoundWorker, num_workers: 10 } # I/O-bound
903
+ ]
904
+ ----
905
+
906
+ === Summary
907
+
908
+ You've built a production-ready background job processor with:
909
+
910
+ ✓ Continuous operation with graceful shutdown
911
+ ✓ Multiple worker types for different jobs
912
+ ✓ Thread-safe job submission
913
+ ✓ Health monitoring and metrics
914
+ ✓ HTTP endpoints for monitoring
915
+ ✓ Production deployment configs
916
+
917
+ **Key takeaways:**
918
+
919
+ 1. Use ContinuousServer for simplified long-running services
920
+ 2. Implement WorkQueue for dynamic job submission
921
+ 3. Add health monitoring with PerformanceMonitor
922
+ 4. Provide HTTP endpoints for external monitoring
923
+ 5. Use proper signal handling for graceful shutdown
924
+ 6. Deploy with systemd or Docker for reliability
925
+
926
+ === Next Steps
927
+
928
+ * Try the link:../complex-workflows/[Implementing Complex Workflows] tutorial
929
+ * Learn about link:../../guides/workflows/[Workflows] for job dependencies
930
+ * Explore link:../../reference/error-reporting/[Error Reporting] for production monitoring
931
+ * Check production patterns in link:../../guides/continuous-mode/[Continuous Mode]