workflow_manager 0.5.5 → 0.6.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c6828da4a43654aa14bb9a4592a42bdc3d0084daf98e7ab29cb11d94928b54de
4
- data.tar.gz: 87122a9b3e395f12535e700d80568328876f8784917da762956d7abbdd4a77c0
3
+ metadata.gz: fe50e7ccd102e4cd531cf2afb47e1222ca0b06e434ab34fdfff6a0f3a0fd35c3
4
+ data.tar.gz: fd07c0b863627a1a8065e7f710931ba99c0609e5ef9e9e82f107c35eee678b68
5
5
  SHA512:
6
- metadata.gz: a27da5bd02d7d6a8130f9bb8844c3b4a640153a37751e0e7bd34fd64d0c13caaf0e0aade85e34ec4699a3307fe8df6624562e9a0cd48858861ec543bd3c1c910
7
- data.tar.gz: 6f043fe95d6911baffbd0f606f5790d60651a013cea213caf1728632267987f09a20e161ea186f372eaaf66f7823f07e323990268bdc28ec669f37623f02b409
6
+ metadata.gz: 6a9701699b79fd61f889b86dbc3060fe63f6d5e8b6e2532dabb0635b5c3f37f335563dfeda5ba677328ff88e1c2197df60c84e9c9526b4a316eac751b9e3b939
7
+ data.tar.gz: d42a9d888a80e7f8b549a64595d13e3c268499d4f78d00805c070c6d60feb9febd20f335e7e8af398f910f6e1eade7b60cf200e83d99451230a4e0f37e134e0f
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env ruby
2
2
  # encoding: utf-8
3
3
  # 20121112 masa workflow manager client
4
- Version = '20130517-111334'
4
+ Version = '20200522-134606'
5
5
 
6
6
  require 'drb/drb'
7
7
 
@@ -28,3 +28,4 @@ if wfmrc
28
28
  end
29
29
  workflow_manager = DRbObject.new_with_uri(uri)
30
30
  puts workflow_manager.hello
31
+ puts workflow_manager.cluster_node_list
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env ruby
2
2
  # encoding: utf-8
3
3
  # 20121112 masa workflow manager client
4
- Version = '20160317-153614'
4
+ Version = '20200722-161135'
5
5
 
6
6
  require 'drb/drb'
7
7
  require 'workflow_manager/optparse_ex'
@@ -16,6 +16,7 @@ opt = OptionParser.new do |o|
16
16
  o.on(:nodes, '-n nodes', '--nodes', 'Comma separated list of nodes to submit to for g-sub')
17
17
  o.on(:ram, '-r RAM', '--RAM', 'Amount of RAM to request in Gigs for g-sub')
18
18
  o.on(:scratch, '-s scratch', '--scratch', 'Amount of scratch space to request in Gigs for g-sub')
19
+ o.on(:queue, '-q queue', '--queue', 'Queue name')
19
20
  o.parse!(ARGV)
20
21
  end
21
22
  unless script_file = ARGV[0] and script_file =~ /\.sh/
@@ -308,6 +308,7 @@ module WorkflowManager
308
308
  end
309
309
  end
310
310
 
311
+
311
312
  class HydraCluster < Cluster
312
313
  def submit_job(script_file, script_content, option='')
313
314
  # TODO
@@ -358,10 +359,11 @@ module WorkflowManager
358
359
  new_job_script_base = File.basename(new_job_script)
359
360
  log_file = File.join(@log_dir, new_job_script_base + "_o.log")
360
361
  err_file = File.join(@log_dir, new_job_script_base + "_e.log")
361
- #command = "g-sub -o #{log_file} -e #{err_file} #{option} #{new_job_script}"
362
- command = "sbatch -o #{log_file} -e #{err_file} #{option} #{new_job_script}"
362
+ command = "g-sub -o #{log_file} -e #{err_file} -q course #{option} #{new_job_script}"
363
+ #command = "sbatch -o #{log_file} -e #{err_file} #{new_job_script}"
363
364
  job_id = `#{command}`
364
- job_id = job_id.match(/Your job (\d+) \(/)[1]
365
+ #job_id = job_id.match(/Your job (\d+) \(/)[1]
366
+ job_id = job_id.chomp.split.last
365
367
  [job_id, log_file, command]
366
368
  else
367
369
  err_msg = "FGCZDevian10Cluster#submit_job, ERROR: script_name is not *.sh: #{File.basename(script_file)}"
@@ -432,4 +434,119 @@ module WorkflowManager
432
434
  }
433
435
  end
434
436
  end
437
+
438
+ class FGCZDebian10Cluster < Cluster
439
+ def parse(options)
440
+ options = options.split
441
+ ram = if i = options.index("-r")
442
+ options[i+1]
443
+ end
444
+ cores = if i = options.index("-c")
445
+ options[i+1]
446
+ end
447
+ scratch = if i = options.index("-s")
448
+ options[i+1]
449
+ end
450
+ partition = if i = options.index("-p")
451
+ options[i+1]
452
+ end
453
+ new_options = []
454
+ new_options << "--mem=#{ram}G" if ram
455
+ new_options << "-n #{cores}" if cores
456
+ new_options << "--tmp=#{scratch}G" if scratch
457
+ new_options << "-p #{partition}" if partition
458
+ new_options.join(" ")
459
+ end
460
+ def submit_job(script_file, script_content, option='')
461
+ if script_name = File.basename(script_file) and script_name =~ /\.sh/
462
+ script_name = script_name.split(/\.sh/).first + ".sh"
463
+ new_job_script = generate_new_job_script(script_name, script_content)
464
+ new_job_script_base = File.basename(new_job_script)
465
+ log_file = File.join(@log_dir, new_job_script_base + "_o.log")
466
+ err_file = File.join(@log_dir, new_job_script_base + "_e.log")
467
+ #command = "g-sub -o #{log_file} -e #{err_file} -q user #{option} #{new_job_script}"
468
+ sbatch_options = parse(option)
469
+ command = "sbatch -o #{log_file} -e #{err_file} -N 1 #{sbatch_options} #{new_job_script}"
470
+ puts command
471
+ job_id = `#{command}`
472
+ job_id = job_id.chomp.split.last
473
+ [job_id, log_file, command]
474
+ else
475
+ err_msg = "FGCZDebian10Cluster#submit_job, ERROR: script_name is not *.sh: #{File.basename(script_file)}"
476
+ warn err_msg
477
+ raise err_msg
478
+ end
479
+ end
480
+ def job_running?(job_id)
481
+ qstat_flag = false
482
+ IO.popen('squeue') do |io|
483
+ while line=io.gets
484
+ # ["JOBID", "PARTITION", "NAME", "USER", "ST", "TIME", "NODES", "NODELIST(REASON)"]
485
+ # ["206", "employee", "test.sh", "masaomi", "R", "0:03", "1", "fgcz-h-030"]
486
+ jobid, partition, name, user, state, *others = line.chomp.split
487
+ if jobid.strip == job_id and state == 'R'
488
+ qstat_flag = true
489
+ break
490
+ end
491
+ end
492
+ end
493
+ qstat_flag
494
+ end
495
+ def job_ends?(log_file)
496
+ log_flag = false
497
+ IO.popen("tail -n 10 #{log_file} 2> /dev/null") do |io|
498
+ while line=io.gets
499
+ if line =~ /__SCRIPT END__/
500
+ log_flag = true
501
+ break
502
+ end
503
+ end
504
+ end
505
+ log_flag
506
+ end
507
+ def job_pending?(job_id)
508
+ qstat_flag = false
509
+ IO.popen('squeue') do |io|
510
+ while line=io.gets
511
+ jobid, partition, name, user, state, *others = line.chomp.split
512
+ if jobid.strip == job_id and state =~ /PD/
513
+ qstat_flag = true
514
+ break
515
+ end
516
+ end
517
+ end
518
+ qstat_flag
519
+ end
520
+ def copy_commands(org_dir, dest_parent_dir, now=nil)
521
+ commands = if now == "force"
522
+ target_file = File.join(dest_parent_dir, File.basename(org_dir))
523
+ ["g-req copynow -f #{org_dir} #{dest_parent_dir}"]
524
+ elsif now
525
+ ["g-req copynow #{org_dir} #{dest_parent_dir}"]
526
+ else
527
+ ["g-req -w copy #{org_dir} #{dest_parent_dir}"]
528
+ end
529
+ end
530
+ def kill_command(job_id)
531
+ command = "scancel #{job_id}"
532
+ end
533
+ def delete_command(target)
534
+ command = "g-req remove #{target}"
535
+ end
536
+ def cluster_nodes
537
+ nodes = {
538
+ 'fgcz-h-110: cpu 8,mem 30 GB,scr 500G' => 'fgcz-h-110',
539
+ 'fgcz-h-111: cpu 8,mem 30 GB,scr 400G' => 'fgcz-h-111',
540
+ }
541
+ end
542
+ end
543
+
544
+ class FGCZDebian10DemoCluster < FGCZDebian10Cluster
545
+ def copy_commands(org_dir, dest_parent_dir, now=nil)
546
+ commands = ["cp -r #{org_dir} #{dest_parent_dir}"]
547
+ end
548
+ def delete_command(target)
549
+ command = "rm -rf #{target}"
550
+ end
551
+ end
435
552
  end
@@ -1,3 +1,3 @@
1
1
  module WorkflowManager
2
- VERSION = "0.5.5"
2
+ VERSION = "0.6.0"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: workflow_manager
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.5
4
+ version: 0.6.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Functional Genomics Center Zurich
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2020-06-03 00:00:00.000000000 Z
11
+ date: 2020-12-18 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler