mobilize-base 1.3 → 1.21

Sign up to get free protection for your applications and to get access to all the features.
data/README.md CHANGED
@@ -220,8 +220,9 @@ production:
220
220
 
221
221
  gsheet.yml needs:
222
222
  * max_cells, which is the number of cells a sheet is allowed to have
223
- written to it at one time. Default is 50k cells, which is about how
224
- much you can write before things start breaking.
223
+ written to it at one time. Default is 400k cells, which is the max per
224
+ book. Google Drive will throw its own exception if
225
+ you try to write more than that.
225
226
  * Because Google Docs ties date formatting to the Locale for the
226
227
  spreadsheet, there are 2 date format parameters:
227
228
  * read_date_format, which is the format that should be read FROM google
@@ -355,16 +356,22 @@ mobilize_base:resque_web task, as detailed in [Start Resque-Web](#section_Start_
355
356
  Mobilize stores cached data in MongoDB Gridfs.
356
357
  It needs the below parameters, which can be found in the [lib/samples][git_samples] folder.
357
358
 
359
+ * max_versions - the number of __different__ versions of data to keep
360
+ for a given cache. Default is 10. This is meant mostly to allow you to
361
+ restore Runners from cache if necessary.
358
362
  * max_compressed_write_size - the amount of compressed data Gridfs will
359
363
  allow. If you try to write more than this, an exception will be thrown.
360
364
 
361
365
  ``` yml
362
366
  ---
363
367
  development:
368
+ max_versions: 10 #number of versions of cache to keep in gridfs
364
369
  max_compressed_write_size: 1000000000 #~1GB
365
370
  test:
371
+ max_versions: 10 #number of versions of cache to keep in gridfs
366
372
  max_compressed_write_size: 1000000000 #~1GB
367
373
  production:
374
+ max_versions: 10 #number of versions of cache to keep in gridfs
368
375
  max_compressed_write_size: 1000000000 #~1GB
369
376
  ```
370
377
 
@@ -557,14 +564,8 @@ the Runner itself.
557
564
  and "base1.out" for the second test. The first
558
565
  takes the output from the first stage and the second reads it straight
559
566
  from the referenced sheet.
560
- * All stages accept retry parameters:
561
- * retries: an integer specifying the number of times that the system will try it again before giving up.
562
- * delay: an integer specifying the number of seconds between retries.
563
- * always_on: if false, turns the job off on stage failures.
564
- Otherwise the job will retry from the beginning with the same frequency as the Runner refresh rate.
565
- * notify: by default, the stage owner will be notified on failure.
566
- * if false, will not notify the stage owner in the event of a failure.
567
- * If it's an email address, will email the specified person.
567
+ * All stages accept a "retries" parameter, which is an integer specifying the number of times that the system will try it again before
568
+ giving up.
568
569
  * If a stage fails after all retries, it will output its standard error to a tab in the Runner with the name of the job, the name of the stage, and a ".err" extension
569
570
  * The tab will be headed "response" and will contain the exception and backtrace for the error.
570
571
  * The test uses "Requestor_mobilize(test)/base1.out" and
@@ -13,7 +13,7 @@ module GoogleDrive
13
13
  f = self
14
14
  #admin includes workers
15
15
  return true if f.has_admin_acl?
16
- accounts = (Mobilize::Gdrive.admin_emails + Mobilize::Gdrive.worker_emails).uniq
16
+ accounts = (Mobilize::Gdrive.admin_emails + Mobilize::Gdrive.worker_emails)
17
17
  accounts.each do |email|
18
18
  f.update_acl(email)
19
19
  end
@@ -21,9 +21,9 @@ module GoogleDrive
21
21
 
22
22
  def has_admin_acl?
23
23
  f = self
24
- curr_emails = f.acls.map{|a| a.scope}.compact.sort
25
- admin_emails = (Mobilize::Gdrive.admin_emails + Mobilize::Gdrive.worker_emails).uniq
26
- if curr_emails == admin_emails or (curr_emails & admin_emails) == admin_emails
24
+ curr_emails = f.acls.map{|a| a.scope}.sort
25
+ admin_emails = (Mobilize::Gdrive.admin_emails + Mobilize::Gdrive.worker_emails)
26
+ if (curr_emails & admin_emails) == admin_emails
27
27
  return true
28
28
  else
29
29
  return false
@@ -32,9 +32,9 @@ module GoogleDrive
32
32
 
33
33
  def has_worker_acl?
34
34
  f = self
35
- curr_emails = f.acls.map{|a| a.scope}.compact.sort
35
+ curr_emails = f.acls.map{|a| a.scope}.sort
36
36
  worker_emails = Mobilize::Gdrive.worker_emails.sort
37
- if curr_emails == worker_emails or (curr_emails & worker_emails) == worker_emails
37
+ if (curr_emails & worker_emails) == worker_emails
38
38
  return true
39
39
  else
40
40
  return false
@@ -84,7 +84,7 @@ module GoogleDrive
84
84
  end
85
85
  def acl_entry(email)
86
86
  f = self
87
- f.acls.select{|a| ['group','user'].include?(a.scope_type) and a.scope and a.scope == email}.first
87
+ f.acls.select{|a| ['group','user'].include?(a.scope_type) and a.scope == email}.first
88
88
  end
89
89
  def entry_hash
90
90
  f = self
@@ -1,6 +1,6 @@
1
1
  module GoogleDrive
2
2
  class Worksheet
3
- def to_tsv(gsub_line_breaks="\n")
3
+ def to_tsv
4
4
  sheet = self
5
5
  rows = sheet.rows
6
6
  header = rows.first
@@ -8,12 +8,7 @@ module GoogleDrive
8
8
  #look for blank cols to indicate end of row
9
9
  col_last_i = (header.index("") || header.length)-1
10
10
  #ignore user-entered line breaks for purposes of tsv reads
11
- out_tsv = rows.map do |r|
12
- row = r[0..col_last_i].join("\t")
13
- row.gsub!("\n",gsub_line_breaks)
14
- row = row + "\n"
15
- row
16
- end.join + "\n"
11
+ out_tsv = rows.map{|r| r[0..col_last_i].join("\t").gsub("\n","")+"\n"}.join + "\n"
17
12
  out_tsv.tsv_convert_dates(Mobilize::Gsheet.config['sheet_date_format'],
18
13
  Mobilize::Gsheet.config['read_date_format'])
19
14
  end
@@ -11,19 +11,11 @@ class String
11
11
  def opp
12
12
  pp self
13
13
  end
14
- def to_md5
15
- Digest::MD5.hexdigest(self)
16
- end
17
14
  def bash(except=true)
18
- str = self
19
- out_str,err_str = []
20
- status = Open4.popen4(str) do |pid,stdin,stdout,stderr|
21
- out_str = stdout.read
22
- err_str = stderr.read
23
- end
24
- exit_status = status.exitstatus
25
- raise err_str if (exit_status !=0 and except==true)
26
- return out_str
15
+ pid,stdin,stdout,stderr = Open4.popen4(self)
16
+ pid,stdin = [nil,nil]
17
+ raise stderr.read if (stderr.read.length>0 and except==true)
18
+ return stdout.read
27
19
  end
28
20
  def escape_regex
29
21
  str = self
@@ -10,16 +10,13 @@ module YAML
10
10
  #make sure urls have their colon spaces fixed
11
11
  result_hash={}
12
12
  easy_hash.each do |k,v|
13
- #fucking yaml puts spaces in front of the key
14
- #or something
15
- strip_k = k.strip
16
- result_hash[strip_k] = if v.class==String
17
- v.gsub(": //","://")
18
- elsif v.class==Array
19
- v.map{|av| av.to_s.gsub(": //","://")}
20
- else
21
- v
22
- end
13
+ result_hash[k] = if v.class==String
14
+ v.gsub(": //","://")
15
+ elsif v.class==Array
16
+ v.map{|av| av.to_s.gsub(": //","://")}
17
+ else
18
+ v
19
+ end
23
20
  end
24
21
  return result_hash
25
22
  end
@@ -14,44 +14,51 @@ module Mobilize
14
14
  dst = Dataset.find_by_handler_and_path('gbook',path)
15
15
  if dst and dst.http_url.to_s.length>0
16
16
  book = Gbook.find_by_http_url(dst.http_url,gdrive_slot)
17
- if book
18
- return book
17
+ #doesn't count if it's deleted
18
+ if book.entry_hash[:deleted]
19
+ book = nil
19
20
  else
20
- raise "Could not find book #{path} with url #{dst.http_url}, please check dataset"
21
+ return book
21
22
  end
22
23
  end
23
- #try to find books by title
24
24
  books = Gbook.find_all_by_path(path,gdrive_slot)
25
- #sort by publish date; if entry hash retrieval fails (as it does)
26
- #assume the book was published now
27
- book = books.sort_by{|b| begin b.entry_hash[:published];rescue;Time.now.utc.strftime("%Y-%m-%dT%H:%M:%S.000Z");end;}.first
28
- if book
29
- #we know dataset will have blank url since it wasn't picked up above
30
- dst = Dataset.find_or_create_by_handler_and_path('gbook',path)
31
- api_url = book.human_url.split("&").first
32
- dst.update_attributes(:http_url=>api_url)
25
+ dst = Dataset.find_or_create_by_handler_and_path('gbook',path)
26
+ book = nil
27
+ if books.length>1 and dst.http_url.to_s.length>0
28
+ #some idiot process or malicious user created a duplicate book.
29
+ #Fix by deleting all but the one with dst entry's key
30
+ dkey = dst.http_url.split("key=").last
31
+ books.each do |b|
32
+ bkey = b.resource_id.split(":").last
33
+ if bkey == dkey
34
+ book = b
35
+ dst.update_attributes(:http_url=>book.human_url)
36
+ else
37
+ #delete the invalid book
38
+ b.delete
39
+ ("Deleted duplicate book #{path}").oputs
40
+ end
41
+ end
42
+ else
43
+ #If it's a new dst or if there are multiple books
44
+ #take the first
45
+ book = books.first
46
+ dst.update_attributes(:http_url=>book.human_url) if book
33
47
  end
34
48
  return book
35
49
  end
36
-
37
50
  def Gbook.find_or_create_by_path(path,gdrive_slot)
38
51
  book = Gbook.find_by_path(path,gdrive_slot)
52
+ dst = Dataset.find_or_create_by_handler_and_path('gbook',path)
39
53
  if book.nil?
40
54
  #always use owner email to make sure all books are owned by owner account
41
55
  book = Gdrive.root(Gdrive.owner_email).create_spreadsheet(path)
42
56
  ("Created book #{path} at #{Time.now.utc.to_s}; Access at #{book.human_url}").oputs
43
- #check to make sure the dataset has a blank url; if not, error out
44
- dst = Dataset.find_or_create_by_handler_and_path('gbook',path)
45
- if dst.http_url.to_s.length>0
46
- #add acls to book regardless
47
- book.add_admin_acl
48
- raise "Book #{path} is already assigned to #{dst.http_url}; please update the record with #{book.human_url}"
49
- else
50
- api_url = book.human_url.split("&").first
51
- dst.update_attributes(:http_url=>api_url)
52
- book.add_admin_acl
53
- end
54
57
  end
58
+ #always make sure book dataset http URL is up to date
59
+ #and that book has admin acl
60
+ dst.update_attributes(:http_url=>book.human_url)
61
+ book.add_admin_acl
55
62
  return book
56
63
  end
57
64
  end
@@ -1,6 +1,6 @@
1
1
  module Mobilize
2
2
  module Gfile
3
- def Gfile.path_to_dst(path,stage_path,gdrive_slot)
3
+ def Gfile.path_to_dst(path,stage_path)
4
4
  #don't need the ://
5
5
  path = path.split("://").last if path.index("://")
6
6
  if Gfile.find_by_path(path)
@@ -38,8 +38,7 @@ module Mobilize
38
38
  end
39
39
  #update http url for file
40
40
  dst = Dataset.find_by_handler_and_path("gfile",dst_path)
41
- api_url = file.human_url.split("&").first
42
- dst.update_attributes(:http_url=>api_url)
41
+ dst.update_attributes(:http_url=>file.human_url)
43
42
  true
44
43
  end
45
44
 
@@ -87,8 +86,7 @@ module Mobilize
87
86
  #always make sure dataset http URL is up to date
88
87
  #and that it has admin acl
89
88
  if file
90
- api_url = file.human_url.split("&").first
91
- dst.update_attributes(:http_url=>api_url)
89
+ dst.update_attributes(:http_url=>file.human_url)
92
90
  file.add_admin_acl
93
91
  end
94
92
  return file
@@ -1,38 +1,43 @@
1
- require 'tempfile'
2
1
  module Mobilize
3
2
  module Gridfs
4
3
  def Gridfs.config
5
4
  Base.config('gridfs')
6
5
  end
7
6
 
8
- def Gridfs.read_by_dataset_path(dst_path,*args)
9
- curr_file = Mongoid::GridFs::Fs::File.where(:filename=>dst_path).first
10
- zs = curr_file.data if curr_file
11
- return ::Zlib::Inflate.inflate(zs) if zs.to_s.length>0
7
+ def Gridfs.grid
8
+ session = ::Mongoid.configure.sessions['default']
9
+ database_name = session['database']
10
+ host,port = session['hosts'].first.split(":")
11
+ return ::Mongo::GridFileSystem.new(::Mongo::Connection.new(host,port).db(database_name))
12
12
  end
13
13
 
14
- def Gridfs.write_by_dataset_path(dst_path,string,*args)
14
+ def Gridfs.read_by_dataset_path(dst_path,user_name,*args)
15
+ begin
16
+ zs=Gridfs.grid.open(dst_path,'r').read
17
+ return ::Zlib::Inflate.inflate(zs)
18
+ rescue
19
+ return nil
20
+ end
21
+ end
22
+
23
+ def Gridfs.write_by_dataset_path(dst_path,string,user_name,*args)
15
24
  zs = ::Zlib::Deflate.deflate(string)
16
25
  raise "compressed string too large for Gridfs write" if zs.length > Gridfs.config['max_compressed_write_size']
17
- #find and delete existing file
18
- curr_file = Mongoid::GridFs::Fs::File.where(:filename=>dst_path).first
19
- curr_zs = curr_file.data if curr_file
20
- #overwrite when there is a change
26
+ curr_zs = Gridfs.read_by_dataset_path(dst_path,user_name).to_s
27
+ #write a new version when there is a change
21
28
  if curr_zs != zs
22
- Mongoid::GridFs.delete(curr_file.id) if curr_file
23
- #create temp file w zstring
24
- temp_file = ::Tempfile.new("#{string}#{Time.now.to_f}".to_md5)
25
- temp_file.print(zs)
26
- temp_file.close
27
- #put data in file
28
- Mongoid::GridFs.put(temp_file.path,:filename=>dst_path)
29
+ Gridfs.grid.open(dst_path,'w',:versions => Gridfs.config['max_versions']){|f| f.write(zs)}
29
30
  end
30
31
  return true
31
32
  end
32
33
 
33
34
  def Gridfs.delete(dst_path)
34
- curr_file = Mongoid::GridFs::Fs::File.where(:filename=>dst_path).first
35
- curr_file.delete
35
+ begin
36
+ Gridfs.grid.delete(dst_path)
37
+ return true
38
+ rescue
39
+ return nil
40
+ end
36
41
  end
37
42
  end
38
43
  end
@@ -10,10 +10,12 @@ module Mobilize
10
10
  end
11
11
 
12
12
  # converts a source path or target path to a dst in the context of handler and stage
13
- def Gsheet.path_to_dst(path,stage_path,gdrive_slot)
13
+ def Gsheet.path_to_dst(path,stage_path)
14
14
  s = Stage.where(:path=>stage_path).first
15
15
  params = s.params
16
16
  target_path = params['target']
17
+ #take random slot if one is not available
18
+ gdrive_slot = Gdrive.slot_worker_by_path(stage_path) || Gdrive.worker_emails.sort_by{rand}.first
17
19
  #if this is the target, it doesn't have to exist already
18
20
  is_target = true if path == target_path
19
21
  #don't need the ://
@@ -44,7 +46,9 @@ module Mobilize
44
46
 
45
47
  def Gsheet.read_by_dataset_path(dst_path,user_name,*args)
46
48
  #expects gdrive slot as first arg, otherwise chooses random
47
- gdrive_slot = args.to_a.first
49
+ gdrive_slot = args
50
+ worker_emails = Gdrive.worker_emails.sort_by{rand}
51
+ gdrive_slot = worker_emails.first unless worker_emails.include?(gdrive_slot)
48
52
  sheet = Gsheet.find_by_path(dst_path,gdrive_slot)
49
53
  sheet.read(user_name) if sheet
50
54
  end
@@ -52,6 +56,8 @@ module Mobilize
52
56
  def Gsheet.write_by_dataset_path(dst_path,tsv,user_name,*args)
53
57
  #expects gdrive slot as first arg, otherwise chooses random
54
58
  gdrive_slot,crop = args
59
+ worker_emails = Gdrive.worker_emails.sort_by{rand}
60
+ gdrive_slot = worker_emails.first unless worker_emails.include?(gdrive_slot)
55
61
  crop ||= true
56
62
  Gsheet.write_target(dst_path,tsv,user_name,gdrive_slot,crop)
57
63
  end
@@ -81,16 +87,15 @@ module Mobilize
81
87
 
82
88
  def Gsheet.write_temp(target_path,gdrive_slot,tsv)
83
89
  #find and delete temp sheet, if any
84
- temp_book_title = target_path.gridsafe
85
- #create book and sheet
86
- temp_book = Gdrive.root(gdrive_slot).create_spreadsheet(temp_book_title)
87
- rows, cols = tsv.split("\n").ie{|t| [t.length,t.first.split("\t").length]}
88
- temp_sheet = temp_book.add_worksheet("temp",rows,cols)
90
+ temp_path = [target_path.gridsafe,"temp"].join("/")
91
+ temp_sheet = Gsheet.find_by_path(temp_path,gdrive_slot)
92
+ temp_sheet.delete if temp_sheet
93
+ #write data to temp sheet
94
+ temp_sheet = Gsheet.find_or_create_by_path(temp_path,gdrive_slot)
89
95
  #this step has a tendency to fail; if it does,
90
96
  #don't fail the stage, mark it as false
91
97
  begin
92
- gdrive_user = gdrive_slot.split("@").first
93
- temp_sheet.write(tsv,gdrive_user)
98
+ temp_sheet.write(tsv,Gdrive.owner_name)
94
99
  rescue
95
100
  return nil
96
101
  end
@@ -109,7 +114,7 @@ module Mobilize
109
114
  #only give the user edit permissions if they're the ones
110
115
  #creating it
111
116
  target_sheet = Gsheet.find_or_create_by_path(target_path,gdrive_slot)
112
- target_sheet.spreadsheet.update_acl(u.email,"writer") unless target_sheet.spreadsheet.acl_entry(u.email).ie{|e| e and e.role=="owner"}
117
+ target_sheet.spreadsheet.update_acl(user_email,"writer") unless target_sheet.spreadsheet.acl_entry(u.email).ie{|e| e and e.role=="owner"}
113
118
  target_sheet.delete_sheet1
114
119
  end
115
120
  #pass it crop param to determine whether to shrink target sheet to fit data
@@ -129,24 +134,14 @@ module Mobilize
129
134
  crop = s.params['crop'] || true
130
135
  begin
131
136
  #get tsv to write from stage
132
- source = s.sources(gdrive_slot).first
137
+ source = s.sources.first
133
138
  raise "Need source for gsheet write" unless source
134
139
  tsv = source.read(u.name,gdrive_slot)
135
- raise "No data source found for #{source.url}" unless tsv
136
- tsv_row_count = tsv.to_s.split("\n").length
137
- tsv_col_count = tsv.to_s.split("\n").first.to_s.split("\t").length
138
- tsv_cell_count = tsv_row_count * tsv_col_count
139
- stdout = if tsv_row_count == 0
140
- #soft error; no data to write. Stage will complete.
141
- "Write skipped for #{s.target.url}"
142
- elsif tsv_cell_count > Gsheet.max_cells
143
- raise "Too many datapoints; you have #{tsv_cell_count.to_s}, max is #{Gsheet.max_cells.to_s}"
144
- else
145
- Dataset.write_by_url(s.target.url,tsv,u.name,gdrive_slot,crop)
146
- #update status
147
- "Write successful for #{s.target.url}"
148
- end
140
+ raise "No data found in #{source.url}" unless tsv
141
+ Dataset.write_by_url(s.target.url,tsv,u.name,gdrive_slot,crop)
149
142
  Gdrive.unslot_worker_by_path(stage_path)
143
+ #update status
144
+ stdout = "Write successful for #{s.target.url}"
150
145
  stderr = nil
151
146
  s.update_status(stdout)
152
147
  signal = 0
@@ -25,7 +25,7 @@ module Mobilize
25
25
  return idle_workers if state == 'idle'
26
26
  stale_workers = workers.select{|w| Time.parse(w.started) < Jobtracker.deployed_at}
27
27
  return stale_workers if state == 'stale'
28
- timeout_workers = workers.select{|w| w.job['payload'] and w.job['payload']['class']!='Jobtracker' and w.job['run_at'] < (Time.now.utc - Jobtracker.max_run_time)}
28
+ timeout_workers = workers.select{|w| w.job['payload'] and w.job['payload']['class']!='Jobtracker' and w.job['runat'] < (Time.now.utc - Jobtracker.max_run_time)}
29
29
  return timeout_workers if state == 'timeout'
30
30
  raise "invalid state #{state}"
31
31
  end
@@ -109,28 +109,16 @@ module Mobilize
109
109
  Resque.failures.each_with_index do |f,f_i|
110
110
  #skip if already notified
111
111
  next if f['notified']
112
- #try to send message to stage owner, where appropriate
113
112
  stage_path = f['payload']['args'].first
114
- email = begin
115
- s = Stage.where(:path=>stage_path).first
116
- if s.params['notify'].to_s=="false"
117
- next
118
- elsif s.params['notify'].index("@")
119
- s.params['notify']
120
- else
121
- s.job.runner.user.email
122
- end
123
- rescue
124
- #jobs without stages are sent to first admin
125
- Jobtracker.admin_emails.first
126
- end
113
+ s = Stage.where(:path=>stage_path).first
114
+ email = s.job.runner.user.email
127
115
  exc_to_s = f['error']
128
116
  if fjobs[email].nil?
129
117
  fjobs[email] = {stage_path => {exc_to_s => 1}}
130
118
  elsif fjobs[email][stage_path].nil?
131
119
  fjobs[email][stage_path] = {exc_to_s => 1}
132
120
  elsif fjobs[email][stage_path][exc_to_s].nil?
133
- fjobs[email][stage_path][exc_to_s] = 1
121
+ fjobs[email][stage_path][exc_to_s] = 1
134
122
  else
135
123
  fjobs[email][stage_path][exc_to_s] += 1
136
124
  end
@@ -48,8 +48,6 @@ module Mobilize
48
48
  end
49
49
 
50
50
  def Jobtracker.update_status(msg)
51
- #this is to keep jobtracker from resisting stop commands
52
- return false if Jobtracker.status=="stopping"
53
51
  #Jobtracker has no persistent database state
54
52
  Resque.set_worker_args_by_path("jobtracker",{'status'=>msg})
55
53
  return true
@@ -124,7 +122,7 @@ module Mobilize
124
122
  sleep 5
125
123
  i=0
126
124
  while Jobtracker.status=='stopping'
127
- puts "#{Jobtracker.to_s} still on queue, waiting"
125
+ Jobtracker.update_status("#{Jobtracker.to_s} still on queue, waiting")
128
126
  sleep 5
129
127
  i+=1
130
128
  end
@@ -147,8 +145,8 @@ module Mobilize
147
145
  def Jobtracker.max_run_time_workers
148
146
  #return workers who have been cranking away for 6+ hours
149
147
  workers = Jobtracker.workers('working').select do |w|
150
- w.job['run_at'].to_s.length>0 and
151
- (Time.now.utc - Time.parse(w.job['run_at'])) > Jobtracker.max_run_time
148
+ w.job['runat'].to_s.length>0 and
149
+ (Time.now.utc - Time.parse(w.job['runat'])) > Jobtracker.max_run_time
152
150
  end
153
151
  return workers
154
152
  end
@@ -187,18 +185,13 @@ module Mobilize
187
185
  if lws.length>0
188
186
  n = {}
189
187
  n['subject'] = "#{lws.length.to_s} max run time jobs"
190
- n['body'] = lws.map{|w| %{spec:#{w['spec']} stg:#{w['stg']} run_at:#{w['run_at'].to_s}}}.join("\n\n")
188
+ n['body'] = lws.map{|w| %{spec:#{w['spec']} stg:#{w['stg']} runat:#{w['runat'].to_s}}}.join("\n\n")
191
189
  n['to'] = Jobtracker.admin_emails.join(",")
192
190
  notifs << n
193
191
  end
194
192
  #deliver each email generated
195
193
  notifs.each do |notif|
196
- begin
197
- Email.write(notif).deliver
198
- rescue
199
- #log email on failure
200
- Jobtracker.update_status("Failed to deliver #{notif.to_s}")
201
- end
194
+ Email.write(notif).deliver
202
195
  end
203
196
  #update notification time so JT knows to wait a while
204
197
  Jobtracker.last_notification = Time.now.utc.to_s
@@ -293,7 +286,6 @@ module Mobilize
293
286
  # delete any old runner from previous test runs
294
287
  gdrive_slot = Gdrive.owner_email
295
288
  u.runner.gsheet(gdrive_slot).spreadsheet.delete
296
- Dataset.find_by_handler_and_path('gbook',u.runner.title).delete
297
289
  Jobtracker.update_status("enqueue jobtracker, wait 45s")
298
290
  Mobilize::Jobtracker.start
299
291
  sleep 45
@@ -2,54 +2,66 @@ module Mobilize
2
2
  class Job
3
3
  include Mongoid::Document
4
4
  include Mongoid::Timestamps
5
- include Mobilize::JobHelper
6
5
  field :path, type: String
7
6
  field :active, type: Boolean
8
7
  field :trigger, type: String
9
8
 
10
9
  index({ path: 1})
11
10
 
11
+ def name
12
+ j = self
13
+ j.path.split("/").last
14
+ end
15
+
16
+ def stages
17
+ j = self
18
+ #starts with the job path, followed by a slash
19
+ Stage.where(:path=>/^#{j.path.escape_regex}\//).to_a.sort_by{|s| s.path}
20
+ end
21
+
12
22
  def Job.find_or_create_by_path(path)
13
23
  j = Job.where(:path=>path).first
14
24
  j = Job.create(:path=>path) unless j
15
25
  return j
16
26
  end
17
27
 
18
- #takes a hash of job parameters (name, active, trigger, stages)
19
- #and creates/updates a job with it
20
- def Job.update_by_user_name_and_hash(user_name,hash)
21
- u = User.where(name: user_name).first
22
- r = u.runner
23
- j = Job.find_or_create_by_path("#{r.path}/#{hash['name']}")
24
- #update top line params
25
- j.update_attributes(:active => hash['active'],
26
- :trigger => hash['trigger'])
27
- (1..5).to_a.each do |s_idx|
28
- stage_string = hash["stage#{s_idx.to_s}"]
29
- s = Stage.find_by_path("#{j.path}/stage#{s_idx.to_s}")
30
- if stage_string.to_s.length==0
31
- #delete this stage and all stages after
32
- if s
33
- j = s.job
34
- j.stages[(s.idx-1)..-1].each{|ps| ps.delete}
35
- #just in case
36
- s.delete
37
- end
38
- break
39
- elsif s.nil?
40
- #create this stage
41
- s = Stage.find_or_create_by_path("#{j.path}/stage#{s_idx.to_s}")
42
- end
43
- #parse command string, update stage with it
44
- s_handler, call, param_string = [""*3]
45
- stage_string.split(" ").ie do |spls|
46
- s_handler = spls.first.split(".").first
47
- call = spls.first.split(".").last
48
- param_string = spls[1..-1].join(" ").strip
49
- end
50
- s.update_attributes(:call=>call, :handler=>s_handler, :param_string=>param_string)
51
- end
52
- return j.reload
28
+ def status
29
+ #last stage status
30
+ j = self
31
+ j.active_stage.status if j.active_stage
32
+ end
33
+
34
+ def active_stage
35
+ j = self
36
+ #latest started at or first
37
+ j.stages.select{|s| s.started_at}.sort_by{|s| s.started_at}.last || j.stages.first
38
+ end
39
+
40
+ def completed_at
41
+ j = self
42
+ j.stages.last.completed_at if j.stages.last
43
+ end
44
+
45
+ def failed_at
46
+ j = self
47
+ j.active_stage.failed_at if j.active_stage
48
+ end
49
+
50
+ def status_at
51
+ j = self
52
+ j.active_stage.status_at if j.active_stage
53
+ end
54
+
55
+ #convenience methods
56
+ def runner
57
+ j = self
58
+ runner_path = j.path.split("/")[0..-2].join("/")
59
+ return Runner.where(:path=>runner_path).first
60
+ end
61
+
62
+ def is_working?
63
+ j = self
64
+ j.stages.select{|s| s.is_working?}.compact.length>0
53
65
  end
54
66
 
55
67
  def is_due?