ragdoll-rails 0.1.9 → 0.1.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/app/assets/javascripts/ragdoll/application.js +129 -0
- data/app/assets/javascripts/ragdoll/bulk_upload_status.js +454 -0
- data/app/assets/stylesheets/ragdoll/application.css +84 -0
- data/app/assets/stylesheets/ragdoll/bulk_upload_status.css +379 -0
- data/app/channels/application_cable/channel.rb +6 -0
- data/app/channels/application_cable/connection.rb +6 -0
- data/app/channels/ragdoll/bulk_upload_status_channel.rb +27 -0
- data/app/channels/ragdoll/file_processing_channel.rb +26 -0
- data/app/components/ragdoll/alert_component.html.erb +4 -0
- data/app/components/ragdoll/alert_component.rb +32 -0
- data/app/components/ragdoll/application_component.rb +6 -0
- data/app/components/ragdoll/card_component.html.erb +15 -0
- data/app/components/ragdoll/card_component.rb +21 -0
- data/app/components/ragdoll/document_list_component.html.erb +41 -0
- data/app/components/ragdoll/document_list_component.rb +13 -0
- data/app/components/ragdoll/document_table_component.html.erb +76 -0
- data/app/components/ragdoll/document_table_component.rb +13 -0
- data/app/components/ragdoll/empty_state_component.html.erb +12 -0
- data/app/components/ragdoll/empty_state_component.rb +17 -0
- data/app/components/ragdoll/flash_messages_component.html.erb +3 -0
- data/app/components/ragdoll/flash_messages_component.rb +37 -0
- data/app/components/ragdoll/navbar_component.html.erb +24 -0
- data/app/components/ragdoll/navbar_component.rb +31 -0
- data/app/components/ragdoll/page_header_component.html.erb +13 -0
- data/app/components/ragdoll/page_header_component.rb +15 -0
- data/app/components/ragdoll/stats_card_component.html.erb +11 -0
- data/app/components/ragdoll/stats_card_component.rb +17 -0
- data/app/components/ragdoll/status_badge_component.html.erb +3 -0
- data/app/components/ragdoll/status_badge_component.rb +30 -0
- data/app/controllers/ragdoll/api/v1/analytics_controller.rb +72 -0
- data/app/controllers/ragdoll/api/v1/base_controller.rb +29 -0
- data/app/controllers/ragdoll/api/v1/documents_controller.rb +148 -0
- data/app/controllers/ragdoll/api/v1/search_controller.rb +87 -0
- data/app/controllers/ragdoll/api/v1/system_controller.rb +97 -0
- data/app/controllers/ragdoll/application_controller.rb +17 -0
- data/app/controllers/ragdoll/configuration_controller.rb +82 -0
- data/app/controllers/ragdoll/dashboard_controller.rb +98 -0
- data/app/controllers/ragdoll/documents_controller.rb +460 -0
- data/app/controllers/ragdoll/documents_controller_backup.rb +68 -0
- data/app/controllers/ragdoll/jobs_controller.rb +116 -0
- data/app/controllers/ragdoll/search_controller.rb +368 -0
- data/app/jobs/application_job.rb +9 -0
- data/app/jobs/ragdoll/bulk_document_processing_job.rb +280 -0
- data/app/jobs/ragdoll/process_file_job.rb +166 -0
- data/app/services/ragdoll/worker_health_service.rb +111 -0
- data/app/views/layouts/ragdoll/application.html.erb +162 -0
- data/app/views/ragdoll/dashboard/analytics.html.erb +333 -0
- data/app/views/ragdoll/dashboard/index.html.erb +208 -0
- data/app/views/ragdoll/documents/edit.html.erb +91 -0
- data/app/views/ragdoll/documents/index.html.erb +302 -0
- data/app/views/ragdoll/documents/new.html.erb +1518 -0
- data/app/views/ragdoll/documents/show.html.erb +188 -0
- data/app/views/ragdoll/documents/upload_results.html.erb +248 -0
- data/app/views/ragdoll/jobs/index.html.erb +669 -0
- data/app/views/ragdoll/jobs/show.html.erb +129 -0
- data/app/views/ragdoll/search/index.html.erb +324 -0
- data/config/cable.yml +12 -0
- data/config/routes.rb +56 -1
- data/lib/ragdoll/rails/engine.rb +32 -1
- data/lib/ragdoll/rails/version.rb +1 -1
- metadata +86 -1
@@ -0,0 +1,280 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Ragdoll
|
4
|
+
class BulkDocumentProcessingJob < ApplicationJob
|
5
|
+
queue_as :default
|
6
|
+
|
7
|
+
private
|
8
|
+
|
9
|
+
def broadcast_status_update(session_id, data)
|
10
|
+
ActionCable.server.broadcast("bulk_upload_status_#{session_id}", data)
|
11
|
+
rescue => e
|
12
|
+
logger.error "Failed to broadcast status update: #{e.message}"
|
13
|
+
end
|
14
|
+
|
15
|
+
def safe_log_operation(operation, details = {})
|
16
|
+
return unless defined?(RagdollLogging)
|
17
|
+
RagdollLogging.log_operation(operation, details)
|
18
|
+
rescue => e
|
19
|
+
logger.debug "Failed to log operation #{operation}: #{e.message}"
|
20
|
+
end
|
21
|
+
|
22
|
+
def safe_log_error(operation, error, details = {})
|
23
|
+
return unless defined?(RagdollLogging)
|
24
|
+
RagdollLogging.log_error(operation, error, details)
|
25
|
+
rescue => e
|
26
|
+
logger.debug "Failed to log error #{operation}: #{e.message}"
|
27
|
+
end
|
28
|
+
|
29
|
+
def safe_log_performance(operation, duration, details = {})
|
30
|
+
return unless defined?(RagdollLogging)
|
31
|
+
RagdollLogging.log_performance(operation, duration, details)
|
32
|
+
rescue => e
|
33
|
+
logger.debug "Failed to log performance #{operation}: #{e.message}"
|
34
|
+
end
|
35
|
+
|
36
|
+
def perform(session_id, file_paths_data, force_duplicate = false)
|
37
|
+
start_time = Time.current
|
38
|
+
|
39
|
+
# Initialize variables early to avoid nil errors in rescue block
|
40
|
+
total_files = file_paths_data&.size || 0
|
41
|
+
processed_count = 0
|
42
|
+
failed_files = []
|
43
|
+
|
44
|
+
safe_log_operation("bulk_processing_start", {
|
45
|
+
session_id: session_id,
|
46
|
+
file_count: total_files,
|
47
|
+
force_duplicate: force_duplicate,
|
48
|
+
job_id: job_id
|
49
|
+
})
|
50
|
+
|
51
|
+
logger.info "🚀 Starting bulk document processing job for session #{session_id}"
|
52
|
+
logger.info "📁 Processing #{total_files} files"
|
53
|
+
|
54
|
+
# Early return if no files to process
|
55
|
+
if file_paths_data.nil? || file_paths_data.empty?
|
56
|
+
logger.warn "⚠️ No files provided for processing in session #{session_id}"
|
57
|
+
broadcast_status_update(session_id, {
|
58
|
+
type: 'upload_error',
|
59
|
+
error: 'No files provided for processing',
|
60
|
+
status: 'failed'
|
61
|
+
})
|
62
|
+
return
|
63
|
+
end
|
64
|
+
|
65
|
+
# Broadcast upload start
|
66
|
+
broadcast_status_update(session_id, {
|
67
|
+
type: 'upload_start',
|
68
|
+
total_files: total_files,
|
69
|
+
status: 'processing',
|
70
|
+
started_at: Time.current.iso8601
|
71
|
+
})
|
72
|
+
|
73
|
+
batch_size = 10 # Process 10 files at a time for async jobs
|
74
|
+
|
75
|
+
file_paths_data.each_slice(batch_size).with_index do |file_batch, batch_index|
|
76
|
+
logger.info "📦 Processing batch #{batch_index + 1} of #{(total_files.to_f / batch_size).ceil}"
|
77
|
+
|
78
|
+
file_batch.each do |file_data|
|
79
|
+
file_start_time = Time.current
|
80
|
+
|
81
|
+
begin
|
82
|
+
temp_path = file_data[:temp_path]
|
83
|
+
original_filename = file_data[:original_filename]
|
84
|
+
|
85
|
+
safe_log_operation("file_processing_start", {
|
86
|
+
session_id: session_id,
|
87
|
+
filename: original_filename,
|
88
|
+
temp_path: temp_path,
|
89
|
+
file_exists: File.exist?(temp_path),
|
90
|
+
file_size: File.exist?(temp_path) ? File.size(temp_path) : 0
|
91
|
+
})
|
92
|
+
|
93
|
+
unless File.exist?(temp_path)
|
94
|
+
error_msg = "Temporary file not found: #{temp_path}"
|
95
|
+
safe_log_error("file_processing", StandardError.new(error_msg), {
|
96
|
+
session_id: session_id,
|
97
|
+
filename: original_filename,
|
98
|
+
temp_path: temp_path
|
99
|
+
})
|
100
|
+
next
|
101
|
+
end
|
102
|
+
|
103
|
+
logger.info "🔄 Processing file: #{original_filename}"
|
104
|
+
|
105
|
+
# Broadcast file start
|
106
|
+
progress_percentage = ((processed_count.to_f / total_files) * 100).round(1)
|
107
|
+
broadcast_status_update(session_id, {
|
108
|
+
type: 'file_start',
|
109
|
+
filename: original_filename,
|
110
|
+
processed: processed_count,
|
111
|
+
total: total_files,
|
112
|
+
percentage: progress_percentage,
|
113
|
+
status: 'processing',
|
114
|
+
batch_index: batch_index + 1,
|
115
|
+
total_batches: (total_files.to_f / batch_size).ceil
|
116
|
+
})
|
117
|
+
|
118
|
+
# Process the document
|
119
|
+
ragdoll_start_time = Time.current
|
120
|
+
result = ::Ragdoll.add_document(path: temp_path, force: force_duplicate)
|
121
|
+
ragdoll_duration = Time.current - ragdoll_start_time
|
122
|
+
|
123
|
+
safe_log_performance("ragdoll_add_document", ragdoll_duration, {
|
124
|
+
session_id: session_id,
|
125
|
+
filename: original_filename,
|
126
|
+
result_success: result && result[:success],
|
127
|
+
force_duplicate: force_duplicate
|
128
|
+
})
|
129
|
+
|
130
|
+
if result && result[:success]
|
131
|
+
processed_count += 1
|
132
|
+
file_duration = Time.current - file_start_time
|
133
|
+
|
134
|
+
safe_log_operation("file_processing_success", {
|
135
|
+
session_id: session_id,
|
136
|
+
filename: original_filename,
|
137
|
+
document_id: result[:document_id],
|
138
|
+
processing_duration: file_duration.round(3),
|
139
|
+
processed_count: processed_count,
|
140
|
+
total_files: total_files
|
141
|
+
})
|
142
|
+
|
143
|
+
logger.info "✅ Successfully processed: #{original_filename}"
|
144
|
+
|
145
|
+
# Broadcast success
|
146
|
+
broadcast_status_update(session_id, {
|
147
|
+
type: 'file_complete',
|
148
|
+
filename: original_filename,
|
149
|
+
processed: processed_count,
|
150
|
+
total: total_files,
|
151
|
+
percentage: ((processed_count.to_f / total_files) * 100).round(1),
|
152
|
+
status: 'completed',
|
153
|
+
document_id: result[:document_id],
|
154
|
+
processing_time: file_duration.round(3)
|
155
|
+
})
|
156
|
+
else
|
157
|
+
failed_files << original_filename
|
158
|
+
error_message = result ? result[:error] : 'Unknown error'
|
159
|
+
file_duration = Time.current - file_start_time
|
160
|
+
|
161
|
+
safe_log_error("file_processing", StandardError.new(error_message), {
|
162
|
+
session_id: session_id,
|
163
|
+
filename: original_filename,
|
164
|
+
processing_duration: file_duration.round(3),
|
165
|
+
ragdoll_result: result,
|
166
|
+
temp_path: temp_path,
|
167
|
+
file_size: File.size(temp_path)
|
168
|
+
})
|
169
|
+
|
170
|
+
logger.error "❌ Failed to process: #{original_filename} - #{error_message}"
|
171
|
+
|
172
|
+
# Broadcast error
|
173
|
+
broadcast_status_update(session_id, {
|
174
|
+
type: 'file_error',
|
175
|
+
filename: original_filename,
|
176
|
+
processed: processed_count,
|
177
|
+
total: total_files,
|
178
|
+
percentage: ((processed_count.to_f / total_files) * 100).round(1),
|
179
|
+
status: 'failed',
|
180
|
+
error: error_message,
|
181
|
+
processing_time: file_duration.round(3)
|
182
|
+
})
|
183
|
+
end
|
184
|
+
|
185
|
+
# Clean up temp file
|
186
|
+
File.delete(temp_path) if File.exist?(temp_path)
|
187
|
+
|
188
|
+
rescue => e
|
189
|
+
failed_files << (file_data[:original_filename] || 'unknown file')
|
190
|
+
file_duration = Time.current - file_start_time
|
191
|
+
|
192
|
+
safe_log_error("file_processing_exception", e, {
|
193
|
+
session_id: session_id,
|
194
|
+
filename: file_data[:original_filename],
|
195
|
+
temp_path: file_data[:temp_path],
|
196
|
+
processing_duration: file_duration.round(3),
|
197
|
+
file_data: file_data,
|
198
|
+
processed_count: processed_count,
|
199
|
+
total_files: total_files
|
200
|
+
})
|
201
|
+
|
202
|
+
logger.error "💥 Exception processing file #{file_data[:original_filename]}: #{e.message}"
|
203
|
+
logger.error e.backtrace.join("\n")
|
204
|
+
|
205
|
+
# Broadcast error
|
206
|
+
ActionCable.server.broadcast("ragdoll_file_processing_#{session_id}", {
|
207
|
+
type: 'file_error',
|
208
|
+
filename: file_data[:original_filename],
|
209
|
+
processed: processed_count,
|
210
|
+
total: total_files,
|
211
|
+
percentage: ((processed_count.to_f / total_files) * 100).round(1),
|
212
|
+
status: 'failed',
|
213
|
+
error: e.message
|
214
|
+
})
|
215
|
+
end
|
216
|
+
end
|
217
|
+
|
218
|
+
# Force garbage collection after each batch
|
219
|
+
GC.start
|
220
|
+
|
221
|
+
# Small delay between batches to prevent overwhelming the system
|
222
|
+
sleep(0.1)
|
223
|
+
end
|
224
|
+
|
225
|
+
# Broadcast final completion
|
226
|
+
total_duration = Time.current - start_time
|
227
|
+
final_percentage = 100.0
|
228
|
+
broadcast_status_update(session_id, {
|
229
|
+
type: 'upload_complete',
|
230
|
+
processed: processed_count,
|
231
|
+
total: total_files,
|
232
|
+
failed: failed_files.size,
|
233
|
+
failed_files: failed_files,
|
234
|
+
percentage: final_percentage,
|
235
|
+
status: 'completed',
|
236
|
+
total_duration: total_duration.round(3),
|
237
|
+
completed_at: Time.current.iso8601
|
238
|
+
})
|
239
|
+
|
240
|
+
safe_log_operation("bulk_processing_complete", {
|
241
|
+
session_id: session_id,
|
242
|
+
total_files: total_files,
|
243
|
+
processed_count: processed_count,
|
244
|
+
failed_count: failed_files.size,
|
245
|
+
failed_files: failed_files,
|
246
|
+
total_duration: total_duration.round(3),
|
247
|
+
avg_file_duration: total_files > 0 ? (total_duration / total_files).round(3) : 0
|
248
|
+
})
|
249
|
+
|
250
|
+
logger.info "🎉 Bulk processing completed for session #{session_id}"
|
251
|
+
logger.info "📊 Results: #{processed_count}/#{total_files} successful, #{failed_files.size} failed"
|
252
|
+
|
253
|
+
rescue => e
|
254
|
+
total_duration = Time.current - start_time
|
255
|
+
|
256
|
+
safe_log_error("bulk_processing_job_failure", e, {
|
257
|
+
session_id: session_id,
|
258
|
+
total_files: total_files,
|
259
|
+
processed_count: processed_count,
|
260
|
+
failed_count: failed_files.size,
|
261
|
+
total_duration: total_duration.round(3),
|
262
|
+
job_id: job_id
|
263
|
+
})
|
264
|
+
|
265
|
+
logger.error "💀 Bulk processing job failed for session #{session_id}: #{e.message}"
|
266
|
+
logger.error e.backtrace.join("\n")
|
267
|
+
|
268
|
+
# Broadcast job failure
|
269
|
+
broadcast_status_update(session_id, {
|
270
|
+
type: 'upload_error',
|
271
|
+
error: e.message,
|
272
|
+
status: 'failed',
|
273
|
+
processed: processed_count,
|
274
|
+
total: total_files,
|
275
|
+
failed_at: Time.current.iso8601,
|
276
|
+
total_duration: total_duration.round(3)
|
277
|
+
})
|
278
|
+
end
|
279
|
+
end
|
280
|
+
end
|
@@ -0,0 +1,166 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Ragdoll
|
4
|
+
class ProcessFileJob < ApplicationJob
|
5
|
+
queue_as :default
|
6
|
+
|
7
|
+
def perform(file_id, session_id, filename, temp_path)
|
8
|
+
::Rails.logger.info "🚀 Ragdoll::ProcessFileJob starting: file_id=#{file_id}, session_id=#{session_id}, filename=#{filename}"
|
9
|
+
::Rails.logger.info "📁 Temp file path: #{temp_path}"
|
10
|
+
::Rails.logger.info "📊 Temp file exists: #{File.exist?(temp_path)}"
|
11
|
+
::Rails.logger.info "📏 Temp file size: #{File.exist?(temp_path) ? File.size(temp_path) : 'N/A'} bytes"
|
12
|
+
|
13
|
+
begin
|
14
|
+
# Verify temp file exists before processing
|
15
|
+
unless File.exist?(temp_path)
|
16
|
+
raise "Temporary file not found: #{temp_path}"
|
17
|
+
end
|
18
|
+
|
19
|
+
# Broadcast start
|
20
|
+
broadcast_data = {
|
21
|
+
file_id: file_id,
|
22
|
+
filename: filename,
|
23
|
+
status: 'started',
|
24
|
+
progress: 0,
|
25
|
+
message: 'Starting file processing...'
|
26
|
+
}
|
27
|
+
|
28
|
+
::Rails.logger.info "📡 Broadcasting start: #{broadcast_data}"
|
29
|
+
begin
|
30
|
+
ActionCable.server.broadcast("ragdoll_file_processing_#{session_id}", broadcast_data)
|
31
|
+
::Rails.logger.info "✅ ActionCable broadcast sent successfully"
|
32
|
+
|
33
|
+
# Track job start in monitoring system
|
34
|
+
track_job_progress(session_id, file_id, filename, 0, 'started')
|
35
|
+
rescue => e
|
36
|
+
::Rails.logger.error "❌ ActionCable broadcast failed: #{e.message}"
|
37
|
+
::Rails.logger.error e.backtrace.first(3)
|
38
|
+
end
|
39
|
+
|
40
|
+
# Simulate progress updates during processing
|
41
|
+
broadcast_progress(session_id, file_id, filename, 25, 'Reading file...')
|
42
|
+
track_job_progress(session_id, file_id, filename, 25, 'processing')
|
43
|
+
|
44
|
+
# Use Ragdoll to add document
|
45
|
+
result = ::Ragdoll.add_document(path: temp_path)
|
46
|
+
|
47
|
+
broadcast_progress(session_id, file_id, filename, 75, 'Generating embeddings...')
|
48
|
+
track_job_progress(session_id, file_id, filename, 75, 'processing')
|
49
|
+
|
50
|
+
if result[:success] && result[:document_id]
|
51
|
+
document = ::Ragdoll::Document.find(result[:document_id])
|
52
|
+
|
53
|
+
# Broadcast completion
|
54
|
+
completion_data = {
|
55
|
+
file_id: file_id,
|
56
|
+
filename: filename,
|
57
|
+
status: 'completed',
|
58
|
+
progress: 100,
|
59
|
+
message: 'Processing completed successfully',
|
60
|
+
document_id: document.id
|
61
|
+
}
|
62
|
+
|
63
|
+
::Rails.logger.info "🎉 Broadcasting completion: #{completion_data}"
|
64
|
+
begin
|
65
|
+
ActionCable.server.broadcast("ragdoll_file_processing_#{session_id}", completion_data)
|
66
|
+
::Rails.logger.info "✅ Completion broadcast sent successfully"
|
67
|
+
|
68
|
+
# Mark job as completed in monitoring system
|
69
|
+
mark_job_completed(session_id, file_id)
|
70
|
+
rescue => e
|
71
|
+
::Rails.logger.error "❌ Completion broadcast failed: #{e.message}"
|
72
|
+
end
|
73
|
+
else
|
74
|
+
raise "Processing failed: #{result[:error] || 'Unknown error'}"
|
75
|
+
end
|
76
|
+
|
77
|
+
rescue => e
|
78
|
+
::Rails.logger.error "💥 Ragdoll::ProcessFileJob error: #{e.message}"
|
79
|
+
::Rails.logger.error e.backtrace.first(5)
|
80
|
+
|
81
|
+
# Broadcast error
|
82
|
+
error_data = {
|
83
|
+
file_id: file_id,
|
84
|
+
filename: filename,
|
85
|
+
status: 'error',
|
86
|
+
progress: 0,
|
87
|
+
message: "Error: #{e.message}"
|
88
|
+
}
|
89
|
+
|
90
|
+
::Rails.logger.info "📡 Broadcasting error: #{error_data}"
|
91
|
+
begin
|
92
|
+
ActionCable.server.broadcast("ragdoll_file_processing_#{session_id}", error_data)
|
93
|
+
::Rails.logger.info "✅ Error broadcast sent successfully"
|
94
|
+
|
95
|
+
# Mark job as failed in monitoring system
|
96
|
+
mark_job_failed(session_id, file_id)
|
97
|
+
rescue => e
|
98
|
+
::Rails.logger.error "❌ Error broadcast failed: #{e.message}"
|
99
|
+
end
|
100
|
+
|
101
|
+
# Re-raise the error to mark job as failed
|
102
|
+
raise e
|
103
|
+
ensure
|
104
|
+
# ALWAYS clean up temp file in ensure block
|
105
|
+
if temp_path && File.exist?(temp_path)
|
106
|
+
::Rails.logger.info "🧹 Cleaning up temp file: #{temp_path}"
|
107
|
+
begin
|
108
|
+
File.delete(temp_path)
|
109
|
+
::Rails.logger.info "✅ Temp file deleted successfully"
|
110
|
+
rescue => e
|
111
|
+
::Rails.logger.error "❌ Failed to delete temp file: #{e.message}"
|
112
|
+
end
|
113
|
+
else
|
114
|
+
::Rails.logger.info "📝 Temp file already cleaned up or doesn't exist: #{temp_path}"
|
115
|
+
end
|
116
|
+
end
|
117
|
+
end
|
118
|
+
|
119
|
+
private
|
120
|
+
|
121
|
+
def broadcast_progress(session_id, file_id, filename, progress, message)
|
122
|
+
broadcast_data = {
|
123
|
+
file_id: file_id,
|
124
|
+
filename: filename,
|
125
|
+
status: 'processing',
|
126
|
+
progress: progress,
|
127
|
+
message: message
|
128
|
+
}
|
129
|
+
|
130
|
+
::Rails.logger.info "📡 Broadcasting progress: #{broadcast_data}"
|
131
|
+
begin
|
132
|
+
ActionCable.server.broadcast("ragdoll_file_processing_#{session_id}", broadcast_data)
|
133
|
+
::Rails.logger.info "✅ Progress broadcast sent successfully"
|
134
|
+
rescue => e
|
135
|
+
::Rails.logger.error "❌ Progress broadcast failed: #{e.message}"
|
136
|
+
end
|
137
|
+
|
138
|
+
# Small delay to simulate processing time
|
139
|
+
sleep(0.5)
|
140
|
+
end
|
141
|
+
|
142
|
+
def track_job_progress(session_id, file_id, filename, progress, status)
|
143
|
+
if defined?(JobFailureMonitorService)
|
144
|
+
JobFailureMonitorService.track_job_progress(session_id, file_id, filename, progress, status)
|
145
|
+
end
|
146
|
+
rescue => e
|
147
|
+
::Rails.logger.error "❌ Failed to track job progress: #{e.message}"
|
148
|
+
end
|
149
|
+
|
150
|
+
def mark_job_completed(session_id, file_id)
|
151
|
+
if defined?(JobFailureMonitorService)
|
152
|
+
JobFailureMonitorService.mark_job_completed(session_id, file_id)
|
153
|
+
end
|
154
|
+
rescue => e
|
155
|
+
::Rails.logger.error "❌ Failed to mark job as completed: #{e.message}"
|
156
|
+
end
|
157
|
+
|
158
|
+
def mark_job_failed(session_id, file_id)
|
159
|
+
if defined?(JobFailureMonitorService)
|
160
|
+
JobFailureMonitorService.mark_job_failed(session_id, file_id)
|
161
|
+
end
|
162
|
+
rescue => e
|
163
|
+
::Rails.logger.error "❌ Failed to mark job as failed: #{e.message}"
|
164
|
+
end
|
165
|
+
end
|
166
|
+
end
|
@@ -0,0 +1,111 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Ragdoll
|
4
|
+
class WorkerHealthService
|
5
|
+
class << self
|
6
|
+
def check_worker_health
|
7
|
+
{
|
8
|
+
status: 'healthy',
|
9
|
+
workers: worker_status,
|
10
|
+
queues: queue_status,
|
11
|
+
timestamp: Time.current
|
12
|
+
}
|
13
|
+
rescue => e
|
14
|
+
{
|
15
|
+
status: 'error',
|
16
|
+
error: e.message,
|
17
|
+
timestamp: Time.current
|
18
|
+
}
|
19
|
+
end
|
20
|
+
|
21
|
+
def needs_restart?
|
22
|
+
# Check if there are stuck jobs or workers
|
23
|
+
stuck_jobs_count > 5 || !workers_running?
|
24
|
+
rescue
|
25
|
+
false
|
26
|
+
end
|
27
|
+
|
28
|
+
def process_stuck_jobs!(limit = 10)
|
29
|
+
processed = 0
|
30
|
+
|
31
|
+
# Find jobs that have been processing for too long (e.g., > 1 hour)
|
32
|
+
if defined?(SolidQueue::Job)
|
33
|
+
stuck_jobs = SolidQueue::Job
|
34
|
+
.where(finished_at: nil)
|
35
|
+
.where('created_at < ?', 1.hour.ago)
|
36
|
+
.limit(limit)
|
37
|
+
|
38
|
+
stuck_jobs.each do |job|
|
39
|
+
job.update(finished_at: Time.current)
|
40
|
+
processed += 1
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
processed
|
45
|
+
rescue => e
|
46
|
+
::Rails.logger.error "Failed to process stuck jobs: #{e.message}"
|
47
|
+
0
|
48
|
+
end
|
49
|
+
|
50
|
+
def restart_workers!
|
51
|
+
# In development, we typically don't restart workers
|
52
|
+
# This would be implemented differently in production
|
53
|
+
::Rails.logger.info "Worker restart requested (no-op in development)"
|
54
|
+
true
|
55
|
+
rescue => e
|
56
|
+
::Rails.logger.error "Failed to restart workers: #{e.message}"
|
57
|
+
false
|
58
|
+
end
|
59
|
+
|
60
|
+
private
|
61
|
+
|
62
|
+
def worker_status
|
63
|
+
if defined?(SolidQueue::Worker)
|
64
|
+
{
|
65
|
+
count: SolidQueue::Worker.count,
|
66
|
+
active: SolidQueue::Worker.where('last_heartbeat_at > ?', 5.minutes.ago).count
|
67
|
+
}
|
68
|
+
else
|
69
|
+
{ count: 0, active: 0 }
|
70
|
+
end
|
71
|
+
rescue
|
72
|
+
{ count: 0, active: 0, error: 'Unable to check worker status' }
|
73
|
+
end
|
74
|
+
|
75
|
+
def queue_status
|
76
|
+
if defined?(SolidQueue::Job)
|
77
|
+
{
|
78
|
+
pending: SolidQueue::Job.where(finished_at: nil).count,
|
79
|
+
completed: SolidQueue::Job.where.not(finished_at: nil).count,
|
80
|
+
failed: defined?(SolidQueue::FailedExecution) ? SolidQueue::FailedExecution.count : 0
|
81
|
+
}
|
82
|
+
else
|
83
|
+
{ pending: 0, completed: 0, failed: 0 }
|
84
|
+
end
|
85
|
+
rescue
|
86
|
+
{ pending: 0, completed: 0, failed: 0, error: 'Unable to check queue status' }
|
87
|
+
end
|
88
|
+
|
89
|
+
def stuck_jobs_count
|
90
|
+
return 0 unless defined?(SolidQueue::Job)
|
91
|
+
|
92
|
+
SolidQueue::Job
|
93
|
+
.where(finished_at: nil)
|
94
|
+
.where('created_at < ?', 1.hour.ago)
|
95
|
+
.count
|
96
|
+
rescue
|
97
|
+
0
|
98
|
+
end
|
99
|
+
|
100
|
+
def workers_running?
|
101
|
+
return false unless defined?(SolidQueue::Worker)
|
102
|
+
|
103
|
+
SolidQueue::Worker
|
104
|
+
.where('last_heartbeat_at > ?', 5.minutes.ago)
|
105
|
+
.exists?
|
106
|
+
rescue
|
107
|
+
true # Assume workers are running if we can't check
|
108
|
+
end
|
109
|
+
end
|
110
|
+
end
|
111
|
+
end
|