naf 2.1.12 → 2.1.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/Gemfile +4 -3
- data/RELEASE_NOTES.rdoc +17 -4
- data/app/assets/images/download.png +0 -0
- data/app/assets/javascripts/dataTablesTemplates/jobs.js +37 -0
- data/app/controllers/naf/historical_jobs_controller.rb +30 -0
- data/app/controllers/naf/log_parsers_controller.rb +9 -0
- data/app/helpers/naf/application_helper.rb +1 -1
- data/app/models/logical/naf/application_schedule.rb +7 -3
- data/app/models/logical/naf/log_parser/base.rb +1 -0
- data/app/models/logical/naf/log_parser/job.rb +4 -3
- data/app/models/logical/naf/log_parser/job_downloader.rb +156 -0
- data/app/models/logical/naf/log_parser/runner.rb +4 -3
- data/app/models/logical/naf/metric_sender.rb +62 -0
- data/app/models/process/naf/database_models_cleanup.rb +91 -0
- data/app/models/process/naf/runner.rb +52 -35
- data/app/views/naf/historical_jobs/_button_control.html.erb +64 -0
- data/app/views/naf/historical_jobs/index.json.erb +26 -5
- data/app/views/naf/historical_jobs/show.html.erb +20 -29
- data/app/views/naf/log_viewer/_job_log_download_button.html.erb +11 -0
- data/app/views/naf/log_viewer/_job_logs.html.erb +3 -0
- data/app/views/naf/log_viewer/_log_display.html.erb +4 -4
- data/app/views/naf/log_viewer/_log_layout.html.erb +5 -0
- data/config/routes.rb +4 -0
- data/lib/naf.rb +8 -0
- data/lib/naf/configuration.rb +5 -1
- data/lib/naf/version.rb +1 -1
- data/naf.gemspec +5 -2
- data/spec/controllers/naf/log_parsers_controller_spec.rb +35 -0
- data/spec/models/logical/naf/application_schedule_spec.rb +41 -0
- data/spec/models/logical/naf/construction_zone/boss_spec.rb +5 -0
- data/spec/models/logical/naf/construction_zone/foreman_spec.rb +6 -3
- data/spec/models/logical/naf/job_downloader_spec.rb +72 -0
- data/spec/models/logical/naf/job_statuses/errored_spec.rb +33 -0
- data/spec/models/logical/naf/job_statuses/finished_less_minute_spec.rb +33 -0
- data/spec/models/logical/naf/job_statuses/finished_spec.rb +33 -0
- data/spec/models/logical/naf/job_statuses/queued_spec.rb +34 -0
- data/spec/models/logical/naf/job_statuses/running_spec.rb +37 -0
- data/spec/models/logical/naf/job_statuses/terminated_spec.rb +33 -0
- data/spec/models/logical/naf/job_statuses/waiting_spec.rb +33 -0
- metadata +80 -6
@@ -0,0 +1,91 @@
|
|
1
|
+
#
|
2
|
+
# This Naf Process Script will cleanup the invalid Naf state by removing data associated
|
3
|
+
# with several models. Therefore, it should only be used on staging and development. Naf
|
4
|
+
# can get in a bad state when database dumps or snapshots are taken while runners are still up.
|
5
|
+
#
|
6
|
+
module Process::Naf
|
7
|
+
class DatabaseModelsCleanup < ::Process::Naf::Application
|
8
|
+
|
9
|
+
opt :options_list, 'description of options'
|
10
|
+
opt :job, 'cleanup data related to jobs'
|
11
|
+
opt :runner, 'cleanup data related to runners'
|
12
|
+
opt :machine, 'cleanup data related to machines'
|
13
|
+
opt :all, 'cleanup data related to jobs, runners, and machines'
|
14
|
+
|
15
|
+
def work
|
16
|
+
if @options_list.present?
|
17
|
+
puts "DESCRIPTION\n\tThe following options are available:\n\n" +
|
18
|
+
"\t--job\t\t->\tRemoves data related to jobs.\n\n" +
|
19
|
+
"\t--runner\t->\tRemoves data related to runners. Job flag (--job) needs to be present.\n\n" +
|
20
|
+
"\t--machine\t->\tRemoves data related to machines. Runner flag (--runner) needs to be present.\n\n" +
|
21
|
+
"\t--all\t\t->\tRemoves data related to jobs, runners, and machines."
|
22
|
+
|
23
|
+
elsif @all.present?
|
24
|
+
cleanup_jobs
|
25
|
+
cleanup_runners
|
26
|
+
cleanup_machines
|
27
|
+
|
28
|
+
elsif can_cleanup?
|
29
|
+
cleanup(true)
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
private
|
34
|
+
|
35
|
+
def can_cleanup?
|
36
|
+
cleanup
|
37
|
+
end
|
38
|
+
|
39
|
+
def cleanup(data_removal = false)
|
40
|
+
if @job.present?
|
41
|
+
cleanup_jobs if data_removal
|
42
|
+
|
43
|
+
if @runner.present?
|
44
|
+
cleanup_runners if data_removal
|
45
|
+
if @machine.present?
|
46
|
+
cleanup_machines if data_removal
|
47
|
+
end
|
48
|
+
|
49
|
+
elsif @machine.present?
|
50
|
+
logger.error "--runner flag must be present"
|
51
|
+
return false
|
52
|
+
end
|
53
|
+
elsif @runner.present? || @machine.present?
|
54
|
+
logger.error "--job flag must be present"
|
55
|
+
return false
|
56
|
+
else
|
57
|
+
return false
|
58
|
+
end
|
59
|
+
|
60
|
+
return true
|
61
|
+
end
|
62
|
+
|
63
|
+
def cleanup_jobs
|
64
|
+
logger.info "Starting to remove job data..."
|
65
|
+
::Naf::HistoricalJobAffinityTab.delete_all
|
66
|
+
::Naf::HistoricalJobPrerequisite.delete_all
|
67
|
+
::Naf::QueuedJob.delete_all
|
68
|
+
::Naf::RunningJob.delete_all
|
69
|
+
::Naf::HistoricalJob.delete_all
|
70
|
+
logger.info "Finished removing job data..."
|
71
|
+
end
|
72
|
+
|
73
|
+
def cleanup_runners
|
74
|
+
logger.info "Starting to remove runner data..."
|
75
|
+
::Naf::MachineRunnerInvocation.delete_all
|
76
|
+
::Naf::MachineRunner.delete_all
|
77
|
+
logger.info "Finished removing runner data..."
|
78
|
+
end
|
79
|
+
|
80
|
+
def cleanup_machines
|
81
|
+
logger.info "Starting to remove machine data..."
|
82
|
+
::Naf::MachineAffinitySlot.delete_all
|
83
|
+
::Naf::Affinity.where(
|
84
|
+
affinity_classification_id: ::Naf::AffinityClassification.machine.id
|
85
|
+
).delete_all
|
86
|
+
::Naf::Machine.delete_all
|
87
|
+
logger.info "Finished removing machine data..."
|
88
|
+
end
|
89
|
+
|
90
|
+
end
|
91
|
+
end
|
@@ -57,6 +57,7 @@ module Process::Naf
|
|
57
57
|
"#{af_name}.yml",
|
58
58
|
"#{af_name}-#{Rails.env}.yml"]
|
59
59
|
@last_machine_log_level = nil
|
60
|
+
@metric_send_delay = ::Naf.configuration.metric_send_delay
|
60
61
|
end
|
61
62
|
|
62
63
|
def work
|
@@ -64,9 +65,11 @@ module Process::Naf
|
|
64
65
|
|
65
66
|
@machine = ::Naf::Machine.find_by_server_address(@server_address)
|
66
67
|
|
68
|
+
@metric_sender = ::Logical::Naf::MetricSender.new(@metric_send_delay, @machine)
|
69
|
+
|
67
70
|
unless machine.present?
|
68
|
-
logger.fatal
|
69
|
-
logger.fatal
|
71
|
+
logger.fatal "This machine is not configued correctly (ipaddress: #{@server_address})."
|
72
|
+
logger.fatal "Please update #{::Naf::Machine.table_name} with an entry for this machine."
|
70
73
|
logger.fatal "Exiting..."
|
71
74
|
exit 1
|
72
75
|
end
|
@@ -101,7 +104,7 @@ module Process::Naf
|
|
101
104
|
::Naf::RunningJob.
|
102
105
|
joins("INNER JOIN #{Naf.schema_name}.historical_jobs AS hj ON hj.id = #{Naf.schema_name}.running_jobs.id").
|
103
106
|
where('finished_at IS NOT NULL AND hj.started_on_machine_id = ?', @machine.id).readonly(false).each do |job|
|
104
|
-
logger.debug
|
107
|
+
logger.debug "removing invalid job #{job.inspect}"
|
105
108
|
job.delete
|
106
109
|
end
|
107
110
|
end
|
@@ -133,11 +136,11 @@ module Process::Naf
|
|
133
136
|
if invocation.dead_at.blank?
|
134
137
|
begin
|
135
138
|
retval = Process.kill(0, invocation.pid)
|
136
|
-
logger.detail
|
139
|
+
logger.detail "#{retval} = kill(0, #{invocation.pid}) -- process alive, marking runner invocation as winding down"
|
137
140
|
invocation.wind_down_at = Time.zone.now
|
138
141
|
invocation.save!
|
139
142
|
rescue Errno::ESRCH
|
140
|
-
logger.detail
|
143
|
+
logger.detail "ESRCH = kill(0, #{invocation.pid}) -- marking runner invocation as not running"
|
141
144
|
invocation.dead_at = Time.zone.now
|
142
145
|
invocation.save!
|
143
146
|
terminate_old_processes(invocation)
|
@@ -184,7 +187,7 @@ module Process::Naf
|
|
184
187
|
# Make sure no processes are thought to be running on this machine
|
185
188
|
terminate_old_processes(machine) if @kill_all_runners
|
186
189
|
|
187
|
-
logger.info
|
190
|
+
logger.info "working: #{machine}"
|
188
191
|
|
189
192
|
@children = {}
|
190
193
|
|
@@ -207,10 +210,10 @@ module Process::Naf
|
|
207
210
|
|
208
211
|
# Check machine status
|
209
212
|
if !machine.enabled
|
210
|
-
logger.warn
|
213
|
+
logger.warn "this machine is disabled #{machine}"
|
211
214
|
return false
|
212
215
|
elsif machine.marked_down
|
213
|
-
logger.warn
|
216
|
+
logger.warn "this machine is marked down #{machine}"
|
214
217
|
return false
|
215
218
|
end
|
216
219
|
|
@@ -230,12 +233,25 @@ module Process::Naf
|
|
230
233
|
start_new_jobs
|
231
234
|
end
|
232
235
|
|
236
|
+
send_metrics
|
237
|
+
|
233
238
|
cleanup_dead_children
|
234
239
|
cleanup_old_processes(1.week, 75.minutes) if (Time.zone.now - @last_cleaned_up_processes) > 1.hour
|
235
240
|
|
236
241
|
return true
|
237
242
|
end
|
238
243
|
|
244
|
+
def send_metrics
|
245
|
+
# Only send metrics if not winding down, or winding down and only runner.
|
246
|
+
logger.debug "checking whether it's time to send metrics"
|
247
|
+
@current_invocation.reload
|
248
|
+
if @current_invocation.wind_down_at.present?
|
249
|
+
return nil if @machine.machine_runners.running.count > 0
|
250
|
+
end
|
251
|
+
logger.debug "sending metrics"
|
252
|
+
@metric_sender.send_metrics
|
253
|
+
end
|
254
|
+
|
239
255
|
def check_log_level
|
240
256
|
if machine.log_level != @last_machine_log_level
|
241
257
|
@last_machine_log_level = machine.log_level
|
@@ -246,7 +262,7 @@ module Process::Naf
|
|
246
262
|
end
|
247
263
|
|
248
264
|
def check_schedules
|
249
|
-
logger.debug
|
265
|
+
logger.debug "last time schedules were checked: #{::Naf::Machine.last_time_schedules_were_checked}"
|
250
266
|
if ::Naf::Machine.is_it_time_to_check_schedules?(@check_schedules_period.minutes)
|
251
267
|
logger.debug "it's time to check schedules"
|
252
268
|
if ::Naf::ApplicationSchedule.try_lock_schedules
|
@@ -256,7 +272,7 @@ module Process::Naf
|
|
256
272
|
|
257
273
|
# check scheduled tasks
|
258
274
|
::Naf::ApplicationSchedule.should_be_queued.each do |application_schedule|
|
259
|
-
logger.info
|
275
|
+
logger.info "scheduled application: #{application_schedule}"
|
260
276
|
begin
|
261
277
|
naf_boss = ::Logical::Naf::ConstructionZone::Boss.new
|
262
278
|
# this doesn't work very well for run_group_limits in the thousands
|
@@ -264,18 +280,18 @@ module Process::Naf
|
|
264
280
|
naf_boss.enqueue_application_schedule(application_schedule)
|
265
281
|
end
|
266
282
|
rescue ::Naf::HistoricalJob::JobPrerequisiteLoop => jpl
|
267
|
-
logger.error
|
283
|
+
logger.error "#{machine} couldn't queue schedule because of prerequisite loop: #{jpl.message}"
|
268
284
|
logger.warn jpl
|
269
285
|
application_schedule.enabled = false
|
270
286
|
application_schedule.save!
|
271
|
-
logger.alarm
|
287
|
+
logger.alarm "Application Schedule disabled due to loop: #{application_schedule}"
|
272
288
|
end
|
273
289
|
end
|
274
290
|
|
275
291
|
# check the runner machines
|
276
292
|
::Naf::Machine.enabled.up.each do |runner_to_check|
|
277
293
|
if runner_to_check.is_stale?(@runner_stale_period.minutes)
|
278
|
-
logger.alarm
|
294
|
+
logger.alarm "runner is stale for #{@runner_stale_period} minutes, #{runner_to_check}"
|
279
295
|
runner_to_check.mark_machine_down(machine)
|
280
296
|
end
|
281
297
|
end
|
@@ -299,19 +315,19 @@ module Process::Naf
|
|
299
315
|
check_dead_children_not_exited_properly
|
300
316
|
break
|
301
317
|
rescue Errno::ECHILD => e
|
302
|
-
logger.error
|
318
|
+
logger.error "#{machine} No child when we thought we had children #{@children.inspect}"
|
303
319
|
logger.warn e
|
304
320
|
pid = @children.first.try(:first)
|
305
321
|
status = nil
|
306
|
-
logger.warn
|
322
|
+
logger.warn "pulling first child off list to clean it up: pid=#{pid}"
|
307
323
|
end
|
308
324
|
|
309
325
|
if pid
|
310
326
|
begin
|
311
327
|
cleanup_dead_child(pid, status)
|
312
328
|
rescue ActiveRecord::ActiveRecordError => are
|
313
|
-
logger.error
|
314
|
-
logger.error
|
329
|
+
logger.error "Failure during cleaning up of dead child with pid: #{pid}, status: #{status}"
|
330
|
+
logger.error "#{are.message}"
|
315
331
|
rescue StandardError => e
|
316
332
|
# XXX just incase a job control failure -- more code here
|
317
333
|
logger.error "some failure during child clean up"
|
@@ -338,7 +354,7 @@ module Process::Naf
|
|
338
354
|
end
|
339
355
|
|
340
356
|
unless dead_children.blank?
|
341
|
-
logger.error
|
357
|
+
logger.error "#{machine}: dead children even with timeout during waitpid2(): #{dead_children.inspect}"
|
342
358
|
logger.warn "this isn't necessarily incorrect -- look for the pids to be cleaned up next round, if not: call it a bug"
|
343
359
|
end
|
344
360
|
end
|
@@ -351,25 +367,31 @@ module Process::Naf
|
|
351
367
|
child_job.remove_tags([::Naf::HistoricalJob::SYSTEM_TAGS[:work]])
|
352
368
|
|
353
369
|
if status.nil? || status.exited? || status.signaled?
|
354
|
-
logger.info {
|
370
|
+
logger.info { "cleaning up dead child: #{child_job.inspect}" }
|
355
371
|
finish_job(child_job,
|
356
372
|
{ exit_status: (status && status.exitstatus), termination_signal: (status && status.termsig) })
|
373
|
+
if status && status.exitstatus > 0 && !child_job.request_to_terminate
|
374
|
+
@metric_sender.statsd.event("Naf Job Error",
|
375
|
+
"#{child_job.inspect} finished with non-zero exit status.",
|
376
|
+
alert_type: "error",
|
377
|
+
tags: (::Naf.configuration.metric_tags << "naf:joberror"))
|
378
|
+
end
|
357
379
|
else
|
358
380
|
# this can happen if the child is sigstopped
|
359
|
-
logger.warn
|
381
|
+
logger.warn "child waited for did not exit: #{child_job.inspect}, status: #{status.inspect}"
|
360
382
|
end
|
361
383
|
else
|
362
384
|
# XXX ERROR no child for returned pid -- this can't happen
|
363
|
-
logger.warn
|
385
|
+
logger.warn "child pid: #{pid}, status: #{status.inspect}, not managed by this runner"
|
364
386
|
end
|
365
387
|
end
|
366
388
|
|
367
389
|
def start_new_jobs
|
368
|
-
logger.detail
|
390
|
+
logger.detail "starting new jobs, num children: #{@children.length}/#{machine.thread_pool_size}"
|
369
391
|
while ::Naf::RunningJob.where(started_on_machine_id: machine.id).count < machine.thread_pool_size &&
|
370
392
|
memory_available_to_spawn? && current_invocation.wind_down_at.blank?
|
371
393
|
|
372
|
-
logger.debug_gross
|
394
|
+
logger.debug_gross "fetching jobs because: children: #{@children.length} < #{machine.thread_pool_size} (poolsize)"
|
373
395
|
begin
|
374
396
|
running_job = @job_fetcher.fetch_next_job
|
375
397
|
|
@@ -378,7 +400,7 @@ module Process::Naf
|
|
378
400
|
break
|
379
401
|
end
|
380
402
|
|
381
|
-
logger.info
|
403
|
+
logger.info "starting new job : #{running_job.inspect}"
|
382
404
|
|
383
405
|
pid = running_job.historical_job.spawn
|
384
406
|
if pid.present?
|
@@ -389,10 +411,10 @@ module Process::Naf
|
|
389
411
|
running_job.historical_job.machine_runner_invocation_id = current_invocation.id
|
390
412
|
running_job.save!
|
391
413
|
running_job.historical_job.save!
|
392
|
-
logger.info
|
414
|
+
logger.info "job started : #{running_job.inspect}"
|
393
415
|
else
|
394
416
|
# should never get here (well, hopefully)
|
395
|
-
logger.error
|
417
|
+
logger.error "#{machine}: failed to execute #{running_job.inspect}"
|
396
418
|
|
397
419
|
finish_job(running_job, { failed_to_start: true })
|
398
420
|
end
|
@@ -400,7 +422,7 @@ module Process::Naf
|
|
400
422
|
raise
|
401
423
|
rescue StandardError => e
|
402
424
|
# XXX rescue for various issues
|
403
|
-
logger.error
|
425
|
+
logger.error "#{machine}: failure during job start"
|
404
426
|
logger.warn e
|
405
427
|
end
|
406
428
|
end
|
@@ -476,7 +498,7 @@ module Process::Naf
|
|
476
498
|
|
477
499
|
logger.info "number of old jobs to sift through: #{jobs.length}"
|
478
500
|
jobs.each do |job|
|
479
|
-
logger.detail
|
501
|
+
logger.detail "job still around: #{job.inspect}"
|
480
502
|
if job.request_to_terminate == false
|
481
503
|
logger.warn "politely asking process: #{job.pid} to terminate itself"
|
482
504
|
job.request_to_terminate = true
|
@@ -500,7 +522,7 @@ module Process::Naf
|
|
500
522
|
return
|
501
523
|
end
|
502
524
|
jobs.each do |job|
|
503
|
-
logger.warn
|
525
|
+
logger.warn "sending SIG_TERM to process: #{job.inspect}"
|
504
526
|
send_signal_and_maybe_clean_up(job, "TERM")
|
505
527
|
end
|
506
528
|
|
@@ -514,7 +536,7 @@ module Process::Naf
|
|
514
536
|
|
515
537
|
# kill with fire
|
516
538
|
assigned_jobs(record).each do |job|
|
517
|
-
logger.alarm
|
539
|
+
logger.alarm "sending SIG_KILL to process: #{job.inspect}"
|
518
540
|
send_signal_and_maybe_clean_up(job, "KILL")
|
519
541
|
|
520
542
|
# job force job down
|
@@ -595,10 +617,5 @@ module Process::Naf
|
|
595
617
|
|
596
618
|
sreclaimable
|
597
619
|
end
|
598
|
-
|
599
|
-
def escape_html(str)
|
600
|
-
CGI::escapeHTML(str)
|
601
|
-
end
|
602
|
-
|
603
620
|
end
|
604
621
|
end
|
@@ -0,0 +1,64 @@
|
|
1
|
+
<% content_for :javascripts do %>
|
2
|
+
<script type='text/javascript'>
|
3
|
+
jQuery(document).ready(function () {
|
4
|
+
jQuery(document).delegate('.terminate', "click", function(){
|
5
|
+
var answer = confirm("You are terminating this job. Are you sure you want to do this?");
|
6
|
+
if (!answer) {
|
7
|
+
return false;
|
8
|
+
}
|
9
|
+
var id = <%= historical_job_id %>;
|
10
|
+
jQuery.ajax({
|
11
|
+
url: id,
|
12
|
+
type:'POST',
|
13
|
+
dataType:'json',
|
14
|
+
data:{ "historical_job[request_to_terminate]": 1, "historical_job_id": id, "_method": "put" },
|
15
|
+
success:function (data) {
|
16
|
+
if (data.success) {
|
17
|
+
var title = data.title ? data.title : data.command
|
18
|
+
jQuery("<p id='notice'>A Job " + title + " was terminated!</p>").
|
19
|
+
appendTo('#flash_message').slideDown().delay(5000).slideUp();
|
20
|
+
setTimeout('window.location.reload()', 5600);
|
21
|
+
}
|
22
|
+
}
|
23
|
+
});
|
24
|
+
});
|
25
|
+
jQuery(document).delegate('.re-enqueue', "click", function(){
|
26
|
+
var url = jQuery(this).attr('content');
|
27
|
+
var new_params = { data: jQuery(this).attr('data') };
|
28
|
+
new_params['job_id'] = jQuery(this).attr('id');
|
29
|
+
|
30
|
+
if (jQuery(this).attr('app_id')) {
|
31
|
+
new_params['app_id'] = jQuery(this).attr('app_id');
|
32
|
+
}
|
33
|
+
|
34
|
+
if (jQuery(this).attr('link')) {
|
35
|
+
new_params['link'] = jQuery(this).attr('link');
|
36
|
+
}
|
37
|
+
|
38
|
+
if (jQuery(this).attr('title_name')) {
|
39
|
+
new_params['title_name'] = jQuery(this).attr('title_name');
|
40
|
+
}
|
41
|
+
|
42
|
+
var answer = confirm("Would you like to enqueue this job?");
|
43
|
+
|
44
|
+
if (!answer) {
|
45
|
+
return false;
|
46
|
+
}
|
47
|
+
|
48
|
+
jQuery.post(url, new_params, function (data) {
|
49
|
+
if (data.success) {
|
50
|
+
jQuery("<p id='notice'>Congratulations, a Job " + data.title + " was added!</p>").
|
51
|
+
appendTo('#flash_message').slideDown().delay(5000).slideUp();
|
52
|
+
setTimeout('window.location.reload()', 5600);
|
53
|
+
}
|
54
|
+
else {
|
55
|
+
jQuery("<div class='error'>Sorry, \'" + data.title +
|
56
|
+
"\' cannot add a Job to the queue right now!</div>").
|
57
|
+
appendTo('#flash_message').slideDown().delay(5000).slideUp();
|
58
|
+
jQuery('#datatable').dataTable().fnDraw();
|
59
|
+
}
|
60
|
+
});
|
61
|
+
});
|
62
|
+
});
|
63
|
+
</script>
|
64
|
+
<% end %>
|
@@ -2,6 +2,7 @@
|
|
2
2
|
rows = @historical_jobs.each do |job|
|
3
3
|
historical_job = ::Naf::HistoricalJob.find_by_id(job[0])
|
4
4
|
if job[1].present? && historical_job.present? && historical_job.machine_runner_invocation.present?
|
5
|
+
runner_path_name = job[1]
|
5
6
|
invocation = historical_job.machine_runner_invocation
|
6
7
|
if invocation.status != 'dead' && job[10] == 'Running'
|
7
8
|
job[1] = "<div class='" + invocation.status + "'>" + job[1] + "</div>".html_safe
|
@@ -9,18 +10,38 @@
|
|
9
10
|
end
|
10
11
|
|
11
12
|
job[12] = link_to image_tag('job.png',
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
13
|
+
class: 'action',
|
14
|
+
title: "View job(id: #{job[0]}, title: #{job[4]}) log"),
|
15
|
+
url_for({ controller: 'log_viewer', action: 'index', record_id: job[0], record_type: 'job' }),
|
16
|
+
{ target: '_blank', id: job[0] }
|
17
|
+
|
18
|
+
if runner_path_name.present?
|
19
|
+
job[12] << " ".html_safe
|
20
|
+
job[12] << (link_to image_tag('download.png',
|
21
|
+
class: 'action',
|
22
|
+
title: "Download all logs for job(id: #{job[0]}, title: #{job[4]}"),
|
23
|
+
"#{http_protocol}#{runner_path_name}#{naf.download_log_parsers_path}?record_id=#{job[0]}&record_type=job")
|
24
|
+
end
|
16
25
|
|
17
26
|
if job[10] == "Running" || job[10] == 'Queued' || job[10] == 'Waiting'
|
18
27
|
job[12] << " ".html_safe
|
19
28
|
job[12] << (link_to image_tag('terminate.png',
|
20
29
|
class: 'action',
|
21
30
|
title: "Terminate job(id: #{job[0]}, title: #{job[4]})"),
|
22
|
-
"#", { class: "terminate", id: job[0]})
|
31
|
+
"#", { class: "terminate", id: job[0]}, content: "#{naf.historical_jobs_path}")
|
32
|
+
elsif job[10] != 'Terminating'
|
33
|
+
# This re-enqueue link is handled by assets/javascripts/dataTableTemplates/jobs.js
|
34
|
+
params = { class: "re-enqueue", id: historical_job.id, content: "#{naf.historical_jobs_path}/reenqueue"}
|
35
|
+
if historical_job.application_id.present?
|
36
|
+
params[:app_id] = historical_job.application_id
|
37
|
+
end
|
38
|
+
job[12] << " ".html_safe
|
39
|
+
job[12] << (link_to image_tag('control_play_blue.png',
|
40
|
+
class: 'action',
|
41
|
+
title: "Re-enqueue one instance of job #{job[4]}"),
|
42
|
+
"#", params )
|
23
43
|
end
|
44
|
+
|
24
45
|
end
|
25
46
|
%>
|
26
47
|
<%= raw rows %>
|