vayacondios-server 0.1.1 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/Gemfile +3 -2
- data/lib/vayacondios/client/notifier.rb +3 -1
- data/lib/vayacondios/version.rb +1 -1
- data/scripts/hadoop_monitor/configurable.rb +2 -17
- data/scripts/hadoop_monitor/hadoop_attempt_scraper.rb +42 -0
- data/scripts/hadoop_monitor/hadoop_client.rb +79 -56
- data/scripts/hadoop_monitor/hadoop_monitor.rb +47 -37
- metadata +69 -23
data/Gemfile
CHANGED
@@ -4,8 +4,9 @@ gemspec name: 'vayacondios-server'
|
|
4
4
|
gemspec name: 'vayacondios-client'
|
5
5
|
|
6
6
|
group :hadoop_monitor do
|
7
|
-
gem 'mongo'
|
8
|
-
gem 'bson_ext'
|
7
|
+
gem 'mongo', '~> 1.8'
|
9
8
|
gem 'gorillib', require: 'gorillib/hash/slice'
|
10
9
|
gem 'json'
|
10
|
+
gem 'nokogiri', '~> 1.5'
|
11
|
+
gem 'nibbler', '~> 1.3'
|
11
12
|
end
|
@@ -21,6 +21,8 @@ class Vayacondios
|
|
21
21
|
end
|
22
22
|
|
23
23
|
class NullNotifier < Notifier
|
24
|
+
def initialize(*args) ; end
|
25
|
+
|
24
26
|
def notify topic, cargo={}
|
25
27
|
end
|
26
28
|
end
|
@@ -56,7 +58,7 @@ class Vayacondios
|
|
56
58
|
|
57
59
|
class NotifierFactory
|
58
60
|
def self.receive(attrs = {})
|
59
|
-
type = attrs
|
61
|
+
type = attrs[:type]
|
60
62
|
case type
|
61
63
|
when 'http' then HttpNotifier.new(attrs)
|
62
64
|
when 'log' then LogNotifier.new(attrs)
|
data/lib/vayacondios/version.rb
CHANGED
@@ -38,30 +38,15 @@ module Vayacondios
|
|
38
38
|
@settings.define(:log_level,
|
39
39
|
default: "info",
|
40
40
|
description: "Log level. See standard Logger class")
|
41
|
-
@settings.define(:
|
41
|
+
@settings.define(:mongo_db,
|
42
42
|
default: 'job_info',
|
43
43
|
description: "Mongo database to dump hadoop job information into")
|
44
|
-
@settings.define(:mongo_job_logs_collection,
|
45
|
-
default: 'job_logs',
|
46
|
-
description: "Mongo collection to dump job logs into.")
|
47
|
-
@settings.define(:mongo_job_events_collection,
|
48
|
-
default: 'job_events',
|
49
|
-
description: "Mongo collection containing jobs events.")
|
50
|
-
@settings.define(:mongo_machine_stats_collection,
|
51
|
-
default: 'machine_stats',
|
52
|
-
description: "Mongo collection containing machine stats.")
|
53
44
|
@settings.define(:mongo_ip,
|
54
45
|
default: nil,
|
55
46
|
description: "IP address of Hadoop monitor node")
|
56
|
-
@settings.define(:
|
57
|
-
default: 10 * (1 << 20),
|
58
|
-
description: ("Size (in bytes) of Mongo jobs log collection"))
|
59
|
-
@settings.define(:job_events_size,
|
47
|
+
@settings.define(:mongo_collection_size,
|
60
48
|
default: 10 * (1 << 20),
|
61
49
|
description: ("Size (in bytes) of Mongo job events collection"))
|
62
|
-
@settings.define(:machine_stats_size,
|
63
|
-
default: 100 * (1 << 20),
|
64
|
-
description: ("Size (in bytes) of machine stats collection"))
|
65
50
|
|
66
51
|
@settings.resolve!
|
67
52
|
|
@@ -0,0 +1,42 @@
|
|
1
|
+
require 'open-uri'
|
2
|
+
require 'nibbler'
|
3
|
+
require 'socket'
|
4
|
+
|
5
|
+
class HadoopAttemptScraper < Nibbler
|
6
|
+
attr_accessor :task_id
|
7
|
+
|
8
|
+
def self.scrape_task(task_id)
|
9
|
+
task_id = task_id.to_s
|
10
|
+
|
11
|
+
url = "http://#{Socket.gethostname}:50030/taskdetails.jsp?tipid=#{task_id}"
|
12
|
+
scrape = parse(open(url))
|
13
|
+
scrape.task_id = task_id
|
14
|
+
|
15
|
+
scrape
|
16
|
+
end
|
17
|
+
|
18
|
+
elements 'table.jobtasks tbody > tr' => :attempts do
|
19
|
+
element 'td:nth-child(1)' => 'attempt_id'
|
20
|
+
element 'td:nth-child(2) a/@href' => 'machine'
|
21
|
+
element 'td:nth-child(3)' => 'status'
|
22
|
+
element 'td:nth-child(4)' => 'progress'
|
23
|
+
element 'td:nth-child(5)' => 'start_time'
|
24
|
+
element 'td:nth-child(6)' => 'finish_time'
|
25
|
+
element 'td:nth-child(7)' => 'errors'
|
26
|
+
end
|
27
|
+
|
28
|
+
def to_attempts
|
29
|
+
attempts.map do |attempt|
|
30
|
+
{
|
31
|
+
_id: attempt.attempt_id.to_s,
|
32
|
+
task_id: task_id,
|
33
|
+
host: attempt.machine.to_s.gsub(/^http:\/\//, '').gsub(/:[0-9]+$/, ''),
|
34
|
+
status: attempt.status,
|
35
|
+
progress: attempt.progress.to_f / 100.0,
|
36
|
+
start_time: Time.parse(attempt.start_time),
|
37
|
+
finish_time: attempt.finish_time.length > 0 ? Time.parse(attempt.finish_time) : nil,
|
38
|
+
errors: attempt.errors
|
39
|
+
}
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
@@ -1,5 +1,7 @@
|
|
1
1
|
require_relative 'configurable'
|
2
2
|
require_relative 'hadoopable'
|
3
|
+
require_relative 'hadoop_attempt_scraper'
|
4
|
+
|
3
5
|
require 'json'
|
4
6
|
require 'optparse'
|
5
7
|
require 'ostruct'
|
@@ -22,7 +24,7 @@ module Vayacondios
|
|
22
24
|
logger.info "Connecting to job tracker."
|
23
25
|
@job_client = JobClient.new JobConf.new(get_hadoop_conf)
|
24
26
|
end
|
25
|
-
|
27
|
+
|
26
28
|
#
|
27
29
|
# (Equality doesn't work for jobs, so - will not work as intended
|
28
30
|
# on arrays of jobs.)
|
@@ -34,7 +36,7 @@ module Vayacondios
|
|
34
36
|
#
|
35
37
|
# Returns the jobs with the specified state. States are specified
|
36
38
|
# by constants in this class.
|
37
|
-
#
|
39
|
+
#
|
38
40
|
def jobs_with_state state
|
39
41
|
jobs_by_state[state] || []
|
40
42
|
end
|
@@ -46,19 +48,19 @@ module Vayacondios
|
|
46
48
|
host_port = job.get_tracking_url[/^(http:\/\/)?[^\/]*/]
|
47
49
|
job_id = job.get_id.to_s
|
48
50
|
conf_uri = "#{host_port}/logs/#{job_id}_conf.xml"
|
49
|
-
|
50
|
-
|
51
|
+
|
52
|
+
parse_properties(open conf_uri)
|
51
53
|
end
|
52
54
|
|
53
55
|
#
|
54
56
|
# Returns the stats for the current job as a hash.
|
55
57
|
#
|
56
58
|
def job_stats job, finish_time
|
57
|
-
parse_job job.get_id
|
59
|
+
parse_job job.get_id, finish_time
|
58
60
|
end
|
59
61
|
|
60
62
|
private
|
61
|
-
|
63
|
+
|
62
64
|
#
|
63
65
|
# Returns a hash JobStatus::<SOME_STATE> => <array of jobs>
|
64
66
|
#
|
@@ -80,27 +82,41 @@ module Vayacondios
|
|
80
82
|
# object that represents it.
|
81
83
|
#
|
82
84
|
def parse_job job_id, finish_time
|
83
|
-
job
|
84
|
-
job_status
|
85
|
+
job = @job_client.get_job job_id
|
86
|
+
job_status = @job_client.get_all_jobs.select{|j| j.get_job_id.to_s == job_id.to_s}.first
|
85
87
|
finished_status = [:FAILED, :KILLED, :COMPLETE]
|
86
88
|
failed_status = [:FAILED]
|
87
89
|
|
90
|
+
start_time = Time.at(job_status.get_start_time / 1000)
|
91
|
+
reduce_progress = job.reduce_progress
|
92
|
+
map_progress = job.map_progress
|
93
|
+
run_duration = (finish_time || Time.now) - start_time
|
94
|
+
|
95
|
+
map_eta = map_progress && map_progress > 0.0 ? (start_time + (run_duration / map_progress)) : nil
|
96
|
+
reduce_eta = reduce_progress && reduce_progress > 0.0 ? (start_time + (run_duration / reduce_progress)) : nil
|
97
|
+
|
88
98
|
job_data = {
|
89
|
-
|
99
|
+
|
90
100
|
_id: job_id.to_s,
|
101
|
+
name: job.get_job_name.to_s,
|
91
102
|
|
92
103
|
# not sure what is what. I'm guessing
|
93
104
|
# JobStatus.getStartTime corresponds to the
|
94
105
|
# launch time in the logs, but I'm going to
|
95
106
|
# go ahead and use it twice here.
|
96
107
|
|
97
|
-
launch_time:
|
98
|
-
submit_time:
|
99
|
-
|
108
|
+
launch_time: start_time,
|
109
|
+
submit_time: start_time,
|
100
110
|
finish_time: finish_time,
|
101
111
|
|
112
|
+
run_duration: run_duration,
|
113
|
+
|
114
|
+
map_eta: map_eta,
|
115
|
+
reduce_eta: reduce_eta,
|
116
|
+
eta: reduce_eta,
|
117
|
+
|
102
118
|
job_status: case job_status.get_run_state
|
103
|
-
when JobStatus::FAILED then :FAILED
|
119
|
+
when JobStatus::FAILED then :FAILED
|
104
120
|
when JobStatus::KILLED then :KILLED
|
105
121
|
when JobStatus::PREP then :PREP
|
106
122
|
when JobStatus::RUNNING then :RUNNING
|
@@ -117,39 +133,45 @@ module Vayacondios
|
|
117
133
|
|
118
134
|
}
|
119
135
|
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
setup_progress: job.setup_progress,
|
130
|
-
|
136
|
+
job_event = {
|
137
|
+
t: Time.now,
|
138
|
+
d: {
|
139
|
+
job_id: job.job_id,
|
140
|
+
cleanup_progress: job.cleanup_progress,
|
141
|
+
map_progress: job.map_progress,
|
142
|
+
reduce_progress: job.reduce_progress,
|
143
|
+
setup_progress: job.setup_progress,
|
144
|
+
}
|
131
145
|
}
|
132
146
|
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
147
|
+
setup_task_data = @job_client.get_setup_task_reports job_id
|
148
|
+
map_task_data = @job_client.get_map_task_reports job_id
|
149
|
+
reduce_task_data = @job_client.get_reduce_task_reports job_id
|
150
|
+
cleanup_task_data = @job_client.get_cleanup_task_reports job_id
|
151
|
+
|
152
|
+
setup_reports = setup_task_data.map{|task| parse_task task, "SETUP", job_id }
|
153
|
+
setup_event_reports = setup_task_data.map{|task| parse_task_progress task, "SETUP" }
|
154
|
+
|
155
|
+
map_reports = map_task_data.map{|task| parse_task task, "MAP", job_id }
|
156
|
+
map_event_reports = map_task_data.map{|task| parse_task_progress task, "MAP" }
|
157
|
+
|
158
|
+
reduce_reports = reduce_task_data.map{|task| parse_task task, "REDUCE", job_id }
|
159
|
+
reduce_event_reports = reduce_task_data.map{|task| parse_task_progress task, "REDUCE" }
|
160
|
+
|
161
|
+
cleanup_reports = cleanup_task_data.map{|task| parse_task task, "CLEANUP", job_id }
|
162
|
+
cleanup_event_reports = cleanup_task_data.map{|task| parse_task_progress task, "CLEANUP" }
|
163
|
+
|
164
|
+
tasks = setup_reports + map_reports + reduce_reports + cleanup_reports
|
165
|
+
task_events = setup_event_reports + map_event_reports + reduce_event_reports + cleanup_event_reports
|
166
|
+
|
167
|
+
attempt_reports = tasks.map{|task| HadoopAttemptScraper.scrape_task(task[:_id]).to_attempts }.flatten
|
146
168
|
|
147
|
-
def recordize_properties properties, job_id
|
148
169
|
{
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
170
|
+
job: job_data,
|
171
|
+
job_event: job_event,
|
172
|
+
tasks: tasks,
|
173
|
+
task_events: task_events,
|
174
|
+
attempts: attempt_reports
|
153
175
|
}
|
154
176
|
end
|
155
177
|
|
@@ -174,25 +196,26 @@ module Vayacondios
|
|
174
196
|
#
|
175
197
|
def parse_task task_report, task_type, parent_job_id
|
176
198
|
{
|
177
|
-
_id:
|
178
|
-
|
179
|
-
task_type:
|
180
|
-
task_status:
|
181
|
-
start_time:
|
182
|
-
finish_time:
|
183
|
-
counters:
|
184
|
-
|
185
|
-
|
186
|
-
running_attempts: task_report.get_running_task_attempts.map(&:to_s),
|
199
|
+
_id: task_report.get_task_id.to_s,
|
200
|
+
job_id: parent_job_id,
|
201
|
+
task_type: task_type,
|
202
|
+
task_status: task_report.get_current_status.to_s,
|
203
|
+
start_time: Time.at(task_report.get_start_time / 1000),
|
204
|
+
finish_time: task_report.get_finish_time > 0 ? Time.at(task_report.get_finish_time / 1000) : nil,
|
205
|
+
counters: parse_counters(task_report.get_counters),
|
206
|
+
diagnostics: task_report.get_diagnostics.map(&:to_s),
|
207
|
+
successful_attempt_id: task_report.get_successful_task_attempt.to_s
|
187
208
|
}
|
188
209
|
end
|
189
210
|
|
190
211
|
def parse_task_progress task_report, task_type
|
191
212
|
{
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
213
|
+
t: Time.now,
|
214
|
+
d: {
|
215
|
+
job_id: task_report.get_task_id.to_s,
|
216
|
+
progress: task_report.get_progress,
|
217
|
+
running_attempt_ids: task_report.get_running_task_attempts.map(&:to_s)
|
218
|
+
}
|
196
219
|
}
|
197
220
|
end
|
198
221
|
|
@@ -15,46 +15,47 @@ module Vayacondios
|
|
15
15
|
class HadoopMonitor
|
16
16
|
def initialize
|
17
17
|
init_settings
|
18
|
-
|
18
|
+
|
19
19
|
@hadoop = HadoopClient.new
|
20
|
-
|
20
|
+
|
21
21
|
@monitored_jobs = []
|
22
22
|
|
23
23
|
logger.debug "Creating mongo collections."
|
24
24
|
@conn = Mongo::Connection.new settings.mongo_ip
|
25
|
-
@db = @conn[settings.
|
26
|
-
@job_logs = @db.create_collection(settings.mongo_job_logs_collection)
|
25
|
+
@db = @conn[settings.mongo_db]
|
27
26
|
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
:size => settings.job_events_size)
|
27
|
+
capped_collection_opts = {
|
28
|
+
:capped => true,
|
29
|
+
:size => settings.mongo_collection_size
|
30
|
+
}
|
33
31
|
|
34
|
-
@
|
32
|
+
@collections = {
|
33
|
+
jobs: @db.create_collection('jobs'),
|
34
|
+
tasks: @db.create_collection('job_tasks'),
|
35
|
+
attempts: @db.create_collection('job_task_attempts'),
|
36
|
+
|
37
|
+
job_events: @db.create_collection('job_events', capped_collection_opts),
|
38
|
+
task_events: @db.create_collection('job_tasks_events', capped_collection_opts),
|
39
|
+
}
|
35
40
|
end
|
36
41
|
|
37
42
|
def run
|
38
43
|
loop do
|
39
|
-
|
44
|
+
|
40
45
|
logger.debug "In main event loop."
|
41
46
|
|
42
|
-
|
43
|
-
|
47
|
+
running_jobs = @hadoop.jobs_with_state HadoopClient::RUNNING
|
48
|
+
started_jobs = @hadoop.subtract(running_jobs, @monitored_jobs)
|
49
|
+
finished_jobs = @hadoop.subtract(@monitored_jobs, running_jobs)
|
44
50
|
|
45
|
-
|
51
|
+
finished_jobs.each do |job|
|
46
52
|
logger.debug "#{job.get_id.to_s} is complete."
|
47
53
|
update_job_stats job, Time.now
|
48
54
|
end
|
49
|
-
@hadoop.subtract(cur_running_jobs, @monitored_jobs).each do |job|
|
50
|
-
logger.debug "#{job.get_id.to_s} started."
|
51
|
-
update_job_properties job
|
52
|
-
end
|
53
55
|
|
54
|
-
|
56
|
+
running_jobs.each{|job| update_job_stats job, nil, @hadoop.subtract([job], started_jobs).empty? }
|
55
57
|
|
56
|
-
@monitored_jobs =
|
57
|
-
update_cluster_state cur_cluster_state
|
58
|
+
@monitored_jobs = running_jobs
|
58
59
|
|
59
60
|
sleep settings.sleep_seconds
|
60
61
|
|
@@ -65,27 +66,36 @@ module Vayacondios
|
|
65
66
|
|
66
67
|
include Configurable
|
67
68
|
|
68
|
-
def
|
69
|
-
|
70
|
-
@cluster_state = new_state
|
71
|
-
logger.info "Cluster state changed to #{@cluster_state}"
|
72
|
-
@job_events.insert(EVENT => @cluster_state, TIME => Time.now.to_i)
|
73
|
-
end
|
69
|
+
def update_job_stats job, finish_time = nil, include_properties = false
|
70
|
+
stats = @hadoop.job_stats(job, finish_time)
|
74
71
|
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
72
|
+
if include_properties
|
73
|
+
stats[:job][:properties] = @hadoop.job_properties job
|
74
|
+
end
|
75
|
+
|
76
|
+
logger.debug "upserting job #{JSON.generate stats[:job]}"
|
77
|
+
@collections[:jobs].update({_id: stats[:job][:_id]}, stats[:job], upsert: true)
|
78
|
+
|
79
|
+
logger.debug "upserting job_event #{JSON.generate stats[:job_event]}"
|
80
|
+
@collections[:job_events].insert(stats[:job_event])
|
81
|
+
|
82
|
+
logger.debug "upserting tasks #{JSON.generate stats[:tasks]}"
|
83
|
+
stats[:tasks].each do |task|
|
84
|
+
@collections[:tasks].update({_id: task[:_id]}, task, upsert: true)
|
85
|
+
end
|
86
|
+
|
87
|
+
logger.debug "upserting task_events #{JSON.generate stats[:task_events]}"
|
88
|
+
stats[:task_events].each do |task_event|
|
89
|
+
@collections[:task_events].insert(task_event)
|
90
|
+
end
|
80
91
|
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
@job_logs.save(job_stat, upsert: true, safe: true)
|
92
|
+
logger.debug "upserting attempts #{JSON.generate stats[:attempts]}"
|
93
|
+
stats[:attempts].each do |attempt|
|
94
|
+
@collections[:attempts].update({_id: attempt[:_id]}, attempt, upsert: true)
|
85
95
|
end
|
86
96
|
end
|
87
97
|
|
88
98
|
end
|
89
|
-
end
|
99
|
+
end
|
90
100
|
|
91
101
|
Vayacondios::HadoopMonitor.new.run
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: vayacondios-server
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.
|
4
|
+
version: 0.1.2
|
5
5
|
prerelease:
|
6
6
|
platform: ruby
|
7
7
|
authors:
|
@@ -12,11 +12,11 @@ authors:
|
|
12
12
|
autorequire:
|
13
13
|
bindir: bin
|
14
14
|
cert_chain: []
|
15
|
-
date: 2012-12-
|
15
|
+
date: 2012-12-17 00:00:00.000000000 Z
|
16
16
|
dependencies:
|
17
17
|
- !ruby/object:Gem::Dependency
|
18
18
|
name: configliere
|
19
|
-
requirement:
|
19
|
+
requirement: !ruby/object:Gem::Requirement
|
20
20
|
none: false
|
21
21
|
requirements:
|
22
22
|
- - ! '>='
|
@@ -24,10 +24,15 @@ dependencies:
|
|
24
24
|
version: 0.4.13
|
25
25
|
type: :runtime
|
26
26
|
prerelease: false
|
27
|
-
version_requirements:
|
27
|
+
version_requirements: !ruby/object:Gem::Requirement
|
28
|
+
none: false
|
29
|
+
requirements:
|
30
|
+
- - ! '>='
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: 0.4.13
|
28
33
|
- !ruby/object:Gem::Dependency
|
29
34
|
name: gorillib
|
30
|
-
requirement:
|
35
|
+
requirement: !ruby/object:Gem::Requirement
|
31
36
|
none: false
|
32
37
|
requirements:
|
33
38
|
- - ~>
|
@@ -35,10 +40,15 @@ dependencies:
|
|
35
40
|
version: 0.4.2
|
36
41
|
type: :runtime
|
37
42
|
prerelease: false
|
38
|
-
version_requirements:
|
43
|
+
version_requirements: !ruby/object:Gem::Requirement
|
44
|
+
none: false
|
45
|
+
requirements:
|
46
|
+
- - ~>
|
47
|
+
- !ruby/object:Gem::Version
|
48
|
+
version: 0.4.2
|
39
49
|
- !ruby/object:Gem::Dependency
|
40
50
|
name: eventmachine
|
41
|
-
requirement:
|
51
|
+
requirement: !ruby/object:Gem::Requirement
|
42
52
|
none: false
|
43
53
|
requirements:
|
44
54
|
- - ~>
|
@@ -46,10 +56,15 @@ dependencies:
|
|
46
56
|
version: 1.0.0.beta.4
|
47
57
|
type: :runtime
|
48
58
|
prerelease: false
|
49
|
-
version_requirements:
|
59
|
+
version_requirements: !ruby/object:Gem::Requirement
|
60
|
+
none: false
|
61
|
+
requirements:
|
62
|
+
- - ~>
|
63
|
+
- !ruby/object:Gem::Version
|
64
|
+
version: 1.0.0.beta.4
|
50
65
|
- !ruby/object:Gem::Dependency
|
51
66
|
name: goliath
|
52
|
-
requirement:
|
67
|
+
requirement: !ruby/object:Gem::Requirement
|
53
68
|
none: false
|
54
69
|
requirements:
|
55
70
|
- - ~>
|
@@ -57,10 +72,15 @@ dependencies:
|
|
57
72
|
version: '1.0'
|
58
73
|
type: :runtime
|
59
74
|
prerelease: false
|
60
|
-
version_requirements:
|
75
|
+
version_requirements: !ruby/object:Gem::Requirement
|
76
|
+
none: false
|
77
|
+
requirements:
|
78
|
+
- - ~>
|
79
|
+
- !ruby/object:Gem::Version
|
80
|
+
version: '1.0'
|
61
81
|
- !ruby/object:Gem::Dependency
|
62
82
|
name: em-http-request
|
63
|
-
requirement:
|
83
|
+
requirement: !ruby/object:Gem::Requirement
|
64
84
|
none: false
|
65
85
|
requirements:
|
66
86
|
- - ~>
|
@@ -68,10 +88,15 @@ dependencies:
|
|
68
88
|
version: '1.0'
|
69
89
|
type: :runtime
|
70
90
|
prerelease: false
|
71
|
-
version_requirements:
|
91
|
+
version_requirements: !ruby/object:Gem::Requirement
|
92
|
+
none: false
|
93
|
+
requirements:
|
94
|
+
- - ~>
|
95
|
+
- !ruby/object:Gem::Version
|
96
|
+
version: '1.0'
|
72
97
|
- !ruby/object:Gem::Dependency
|
73
98
|
name: em-mongo
|
74
|
-
requirement:
|
99
|
+
requirement: !ruby/object:Gem::Requirement
|
75
100
|
none: false
|
76
101
|
requirements:
|
77
102
|
- - ~>
|
@@ -79,10 +104,15 @@ dependencies:
|
|
79
104
|
version: 0.4.3
|
80
105
|
type: :runtime
|
81
106
|
prerelease: false
|
82
|
-
version_requirements:
|
107
|
+
version_requirements: !ruby/object:Gem::Requirement
|
108
|
+
none: false
|
109
|
+
requirements:
|
110
|
+
- - ~>
|
111
|
+
- !ruby/object:Gem::Version
|
112
|
+
version: 0.4.3
|
83
113
|
- !ruby/object:Gem::Dependency
|
84
114
|
name: foreman
|
85
|
-
requirement:
|
115
|
+
requirement: !ruby/object:Gem::Requirement
|
86
116
|
none: false
|
87
117
|
requirements:
|
88
118
|
- - ! '>='
|
@@ -90,10 +120,15 @@ dependencies:
|
|
90
120
|
version: '0'
|
91
121
|
type: :runtime
|
92
122
|
prerelease: false
|
93
|
-
version_requirements:
|
123
|
+
version_requirements: !ruby/object:Gem::Requirement
|
124
|
+
none: false
|
125
|
+
requirements:
|
126
|
+
- - ! '>='
|
127
|
+
- !ruby/object:Gem::Version
|
128
|
+
version: '0'
|
94
129
|
- !ruby/object:Gem::Dependency
|
95
130
|
name: rake
|
96
|
-
requirement:
|
131
|
+
requirement: !ruby/object:Gem::Requirement
|
97
132
|
none: false
|
98
133
|
requirements:
|
99
134
|
- - ! '>='
|
@@ -101,10 +136,15 @@ dependencies:
|
|
101
136
|
version: '0'
|
102
137
|
type: :development
|
103
138
|
prerelease: false
|
104
|
-
version_requirements:
|
139
|
+
version_requirements: !ruby/object:Gem::Requirement
|
140
|
+
none: false
|
141
|
+
requirements:
|
142
|
+
- - ! '>='
|
143
|
+
- !ruby/object:Gem::Version
|
144
|
+
version: '0'
|
105
145
|
- !ruby/object:Gem::Dependency
|
106
146
|
name: mongo
|
107
|
-
requirement:
|
147
|
+
requirement: !ruby/object:Gem::Requirement
|
108
148
|
none: false
|
109
149
|
requirements:
|
110
150
|
- - ! '>='
|
@@ -112,7 +152,12 @@ dependencies:
|
|
112
152
|
version: '0'
|
113
153
|
type: :development
|
114
154
|
prerelease: false
|
115
|
-
version_requirements:
|
155
|
+
version_requirements: !ruby/object:Gem::Requirement
|
156
|
+
none: false
|
157
|
+
requirements:
|
158
|
+
- - ! '>='
|
159
|
+
- !ruby/object:Gem::Version
|
160
|
+
version: '0'
|
116
161
|
description: Simple enough to use in a shell script, performant enough to use everywhere.
|
117
162
|
Dios mío! Record that metric, ese!
|
118
163
|
email:
|
@@ -159,6 +204,7 @@ files:
|
|
159
204
|
- lib/vayacondios/server/rack/path_validation.rb
|
160
205
|
- lib/vayacondios/version.rb
|
161
206
|
- scripts/hadoop_monitor/configurable.rb
|
207
|
+
- scripts/hadoop_monitor/hadoop_attempt_scraper.rb
|
162
208
|
- scripts/hadoop_monitor/hadoop_client.rb
|
163
209
|
- scripts/hadoop_monitor/hadoop_monitor.rb
|
164
210
|
- scripts/hadoop_monitor/hadoopable.rb
|
@@ -193,7 +239,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
193
239
|
version: '0'
|
194
240
|
segments:
|
195
241
|
- 0
|
196
|
-
hash:
|
242
|
+
hash: 1142154938423914035
|
197
243
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
198
244
|
none: false
|
199
245
|
requirements:
|
@@ -202,10 +248,10 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
202
248
|
version: '0'
|
203
249
|
segments:
|
204
250
|
- 0
|
205
|
-
hash:
|
251
|
+
hash: 1142154938423914035
|
206
252
|
requirements: []
|
207
253
|
rubyforge_project:
|
208
|
-
rubygems_version: 1.8.
|
254
|
+
rubygems_version: 1.8.24
|
209
255
|
signing_key:
|
210
256
|
specification_version: 3
|
211
257
|
summary: Data goes in. The right thing happens
|