workflow_manager 0.7.2 → 0.7.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile +2 -1
- data/config/environments/redis.conf +1 -1
- data/lib/job_checker.rb +5 -5
- data/lib/workflow_manager/server.rb +31 -10
- data/lib/workflow_manager/version.rb +1 -1
- data/start_workflow_manager.sh +11 -0
- data/test/job_list.rb +11 -4
- metadata +3 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: e2da7b0c22f10f971c6dc3eb21c89b8321a8f52c2bda0bd42917c20a625ce995
|
4
|
+
data.tar.gz: 2ae4ffec963b02a83f37a3034af4af3835e9b27e018d05b7294979e716b3ec47
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 120bd8ae2bbae899b096d630b7cc050f828b0004c7cf9432cf7d4e16113d4c71d542c5ab8d32fbdb953199e5ae491aaadac0950759b371810b7c84170fc01f1f
|
7
|
+
data.tar.gz: 2c82352987cfcbfced6fe32bbe6a38544788b4d686277bd785a67a719ad22fef052102623388aec7bece99c059b7eb9c921bd07de695097cde7a87177f2ad26e
|
data/Gemfile
CHANGED
data/lib/job_checker.rb
CHANGED
@@ -40,15 +40,15 @@ class JobChecker
|
|
40
40
|
end
|
41
41
|
new_job_script
|
42
42
|
end
|
43
|
-
def update_time_status(status, script_basename, user, project_number)
|
43
|
+
def update_time_status(status, script_basename, user, project_number, next_dataset_id)
|
44
44
|
unless @start_time
|
45
45
|
@start_time = Time.now.strftime("%Y-%m-%d %H:%M:%S")
|
46
46
|
end
|
47
47
|
time = Time.now.strftime("%Y-%m-%d %H:%M:%S")
|
48
|
-
[status, script_basename, [@start_time, time].join("/"), user, project_number].join(',')
|
48
|
+
[status, script_basename, [@start_time, time].join("/"), user, project_number, next_dataset_id].join(',')
|
49
49
|
end
|
50
50
|
|
51
|
-
def perform(job_id, script_basename, log_file, user, project_id)
|
51
|
+
def perform(job_id, script_basename, log_file, user, project_id, next_dataset_id=nil)
|
52
52
|
puts "JobID (in JobChecker): #{job_id}"
|
53
53
|
db0 = Redis.new(port: PORT, db: 0) # state + alpha DB
|
54
54
|
db1 = Redis.new(port: PORT, db: 1) # log DB
|
@@ -63,10 +63,10 @@ class JobChecker
|
|
63
63
|
#print ret
|
64
64
|
state = ret.split(/\n/).last.strip
|
65
65
|
#puts "state: #{state}"
|
66
|
-
db0[job_id] = update_time_status(state, script_basename, user, project_id)
|
66
|
+
db0[job_id] = update_time_status(state, script_basename, user, project_id, next_dataset_id)
|
67
67
|
|
68
68
|
unless state == pre_state
|
69
|
-
db0[job_id] = update_time_status(state, script_basename, user, project_id)
|
69
|
+
db0[job_id] = update_time_status(state, script_basename, user, project_id, next_dataset_id)
|
70
70
|
project_jobs = eval((db2[project_id]||[]).to_s)
|
71
71
|
project_jobs = Hash[*project_jobs]
|
72
72
|
project_jobs[job_id] = state
|
@@ -163,6 +163,7 @@ module WorkflowManager
|
|
163
163
|
RedisDB.new(1, @redis_conf)
|
164
164
|
end
|
165
165
|
@jobs = RedisDB.new(2, @redis_conf)
|
166
|
+
@trees = RedisDB.new(4, @redis_conf)
|
166
167
|
|
167
168
|
@system_log = File.join(@log_dir, "system.log")
|
168
169
|
@mutex = Mutex.new
|
@@ -177,6 +178,7 @@ module WorkflowManager
|
|
177
178
|
log_puts("Cluster = #{@cluster.name}")
|
178
179
|
log_puts("Server starts")
|
179
180
|
log_puts("Recovery check")
|
181
|
+
sleep 2
|
180
182
|
recovery_job_checker
|
181
183
|
end
|
182
184
|
def recovery_job_checker
|
@@ -185,12 +187,12 @@ module WorkflowManager
|
|
185
187
|
statuses.each do |job_id, status|
|
186
188
|
# puts [job_id, status].join(",")
|
187
189
|
# 120249,RUNNING,QC_ventricles_100k.sh,2021-07-30 09:47:04/2021-07-30 09:47:04,masaomi,1535
|
188
|
-
stat, script_basename, time, user, project_number = status.split(",")
|
190
|
+
stat, script_basename, time, user, project_number, next_dataset_id = status.split(",")
|
189
191
|
if stat == "RUNNING" or stat == "PENDING"
|
190
192
|
log_file = logs[job_id]
|
191
193
|
log_puts("JobID (in recovery check): #{job_id}")
|
192
194
|
puts "JobID (in recovery check): #{job_id}"
|
193
|
-
JobChecker.perform_async(job_id, script_basename, log_file, user, project_number)
|
195
|
+
JobChecker.perform_async(job_id, script_basename, log_file, user, project_number, next_dataset_id)
|
194
196
|
end
|
195
197
|
end
|
196
198
|
end
|
@@ -295,7 +297,7 @@ module WorkflowManager
|
|
295
297
|
Thread.current.kill
|
296
298
|
end
|
297
299
|
end
|
298
|
-
def start_monitoring3(script_path, script_content, user='sushi_lover', project_number=0, sge_options='', log_dir='')
|
300
|
+
def start_monitoring3(script_path, script_content, user='sushi_lover', project_number=0, sge_options='', log_dir='', next_dataset_id='')
|
299
301
|
script_basename = File.basename(script_path)
|
300
302
|
job_id, log_file, command = @cluster.submit_job(script_path, script_content, sge_options)
|
301
303
|
#p command
|
@@ -303,7 +305,7 @@ module WorkflowManager
|
|
303
305
|
#p job_id
|
304
306
|
puts "JobID (in WorkflowManager): #{job_id}"
|
305
307
|
sleep 1
|
306
|
-
JobChecker.perform_async(job_id, script_basename, log_file, user, project_number)
|
308
|
+
JobChecker.perform_async(job_id, script_basename, log_file, user, project_number, next_dataset_id)
|
307
309
|
job_id
|
308
310
|
end
|
309
311
|
def start_monitoring2(script_path, script_content, user='sushi_lover', project_number=0, sge_options='', log_dir='')
|
@@ -472,17 +474,23 @@ module WorkflowManager
|
|
472
474
|
job_idsh = if job_ids
|
473
475
|
Hash[*(job_ids.split(',')).map{|job_id| [job_id, true]}.flatten]
|
474
476
|
end
|
475
|
-
|
476
|
-
|
477
|
+
if project_number
|
478
|
+
s_ = {}
|
477
479
|
@jobs.transaction do |jobs|
|
478
480
|
if project_jobs = jobs[project_number]
|
479
481
|
s_ = Hash[*eval(project_jobs)]
|
480
482
|
end
|
481
483
|
end
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
484
|
+
@statuses.transaction do |statuses|
|
485
|
+
s_.each do |job_id, stat|
|
486
|
+
s << [job_id, statuses[job_id]]
|
487
|
+
end
|
488
|
+
end
|
489
|
+
else
|
490
|
+
@statuses.transaction do |statuses|
|
491
|
+
statuses.each do |key, value|
|
492
|
+
s << [key, value]
|
493
|
+
end
|
486
494
|
end
|
487
495
|
end
|
488
496
|
if job_ids
|
@@ -548,6 +556,19 @@ module WorkflowManager
|
|
548
556
|
def cluster_node_list
|
549
557
|
@cluster.node_list
|
550
558
|
end
|
559
|
+
def save_dataset_tree(project_number, json)
|
560
|
+
@trees.transaction do |trees|
|
561
|
+
trees[project_number] = json
|
562
|
+
end
|
563
|
+
json
|
564
|
+
end
|
565
|
+
def load_dataset_tree(project_number)
|
566
|
+
json = nil
|
567
|
+
@trees.transaction do |trees|
|
568
|
+
json = trees[project_number]
|
569
|
+
end
|
570
|
+
json
|
571
|
+
end
|
551
572
|
end
|
552
573
|
end
|
553
574
|
|
@@ -0,0 +1,11 @@
|
|
1
|
+
#!/usr/bin/bash
|
2
|
+
source /usr/local/ngseq/etc/lmod_profile
|
3
|
+
module load Dev/Ruby/2.6.7
|
4
|
+
module load Tools/Redis/6.0.1
|
5
|
+
conda activate gtools_env
|
6
|
+
which python
|
7
|
+
which g-sub
|
8
|
+
which g-req
|
9
|
+
mkdir -p logs
|
10
|
+
mkdir -p dbs
|
11
|
+
bundle exec workflow_manager -d druby://fgcz-h-031:40001
|
data/test/job_list.rb
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
#!/usr/bin/env ruby
|
2
2
|
# encoding: utf-8
|
3
|
-
# Version = '
|
3
|
+
# Version = '20211001-104513'
|
4
4
|
|
5
5
|
PORT = (ARGV[0]||6380).to_i
|
6
6
|
require 'redis'
|
7
7
|
db0 = Redis.new(port: PORT, db: 0)
|
8
8
|
db1 = Redis.new(port: PORT, db: 1)
|
9
9
|
db2 = Redis.new(port: PORT, db: 2)
|
10
|
-
|
10
|
+
db4 = Redis.new(port: PORT, db: 4)
|
11
11
|
|
12
12
|
class Redis
|
13
13
|
def show_all
|
@@ -18,8 +18,8 @@ class Redis
|
|
18
18
|
end
|
19
19
|
end
|
20
20
|
|
21
|
-
dbs = [db0, db1, db2]
|
22
|
-
db_notes = ["state DB", "log DB", "project job DB"]
|
21
|
+
dbs = [db0, db1, db2, db4]
|
22
|
+
db_notes = ["state DB", "log DB", "project job DB", "JS tree DB"]
|
23
23
|
|
24
24
|
dbs.each.with_index do |db, i|
|
25
25
|
note = db_notes[i]
|
@@ -48,3 +48,10 @@ db2.keys.sort.each do |key|
|
|
48
48
|
value = db2.get(key)
|
49
49
|
puts [key, value].join("\t")
|
50
50
|
end
|
51
|
+
|
52
|
+
puts
|
53
|
+
puts "db3, status DB3, project specific"
|
54
|
+
db3.keys.sort.each do |key|
|
55
|
+
value = db3.get(key)
|
56
|
+
puts [key, value].join("\t")
|
57
|
+
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: workflow_manager
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.7.
|
4
|
+
version: 0.7.6
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Functional Genomics Center Zurich
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2021-
|
11
|
+
date: 2021-10-01 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|
@@ -87,6 +87,7 @@ files:
|
|
87
87
|
- spec/cluster_spec.rb
|
88
88
|
- spec/server_spec.rb
|
89
89
|
- spec/spec_helper.rb
|
90
|
+
- start_workflow_manager.sh
|
90
91
|
- test/call_worker4.rb
|
91
92
|
- test/call_worker_method.rb
|
92
93
|
- test/job_list.rb
|