cloud-crowd 0.7.3 → 0.7.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/cloud-crowd.gemspec +6 -2
- data/lib/cloud-crowd.rb +13 -4
- data/lib/cloud_crowd/command_line.rb +3 -3
- data/lib/cloud_crowd/dispatcher.rb +52 -0
- data/lib/cloud_crowd/models/job.rb +2 -2
- data/lib/cloud_crowd/models/node_record.rb +9 -4
- data/lib/cloud_crowd/models/work_unit.rb +1 -1
- data/lib/cloud_crowd/node.rb +40 -9
- data/lib/cloud_crowd/server.rb +14 -15
- data/lib/cloud_crowd/version.rb +4 -0
- data/lib/cloud_crowd/worker.rb +1 -1
- data/test/acceptance/test_failing_work_units.rb +3 -2
- data/test/acceptance/test_server.rb +4 -1
- data/test/acceptance/test_word_count.rb +5 -5
- data/test/test_helper.rb +16 -1
- data/test/unit/test_job.rb +1 -0
- data/test/unit/test_node_record.rb +18 -0
- metadata +4 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 82aa3ec5f4de9712060de2ba1f62207cc4c9d57c
|
4
|
+
data.tar.gz: 8129d294659a958f2ae1031dba8f4b507d064d03
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 45c9bf5f14369c0b9d30d8eb2e6cc3dcbf6bcf046ccff6e6262682ba94b5bb26b3d23fd4fe4c8c1e7f7a980146abad9f12d5fa3baeeace408e6f2e6ca9db0c38
|
7
|
+
data.tar.gz: a4c1687bf3c7ada3354c30a19eaa48d8709f1c2a9bd3b874e4d823519ee3201600d9f20607d003dc7430c188395474d0ffe52d0e36e900e8f2c1c530b96b1111
|
data/cloud-crowd.gemspec
CHANGED
@@ -1,7 +1,11 @@
|
|
1
|
+
lib = File.expand_path('../lib', __FILE__)
|
2
|
+
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
|
3
|
+
require 'cloud_crowd/version'
|
4
|
+
|
1
5
|
Gem::Specification.new do |s|
|
2
6
|
s.name = 'cloud-crowd'
|
3
|
-
s.version =
|
4
|
-
s.date =
|
7
|
+
s.version = CloudCrowd::VERSION
|
8
|
+
s.date = CloudCrowd::VERSION_RELEASED
|
5
9
|
|
6
10
|
s.homepage = "http://wiki.github.com/documentcloud/cloud-crowd"
|
7
11
|
s.summary = "Parallel Processing for the Rest of Us"
|
data/lib/cloud-crowd.rb
CHANGED
@@ -25,6 +25,7 @@ autoload :YAML, 'yaml'
|
|
25
25
|
# Common code which should really be required in every circumstance.
|
26
26
|
require 'socket'
|
27
27
|
require 'net/http'
|
28
|
+
require 'cloud_crowd/version'
|
28
29
|
require 'cloud_crowd/exceptions'
|
29
30
|
require 'rest_client'
|
30
31
|
require 'pathname'
|
@@ -45,9 +46,7 @@ module CloudCrowd
|
|
45
46
|
autoload :Server, 'cloud_crowd/server'
|
46
47
|
autoload :Worker, 'cloud_crowd/worker'
|
47
48
|
autoload :WorkUnit, 'cloud_crowd/models'
|
48
|
-
|
49
|
-
# Keep this version in sync with the gemspec.
|
50
|
-
VERSION = '0.7.2'
|
49
|
+
autoload :Dispatcher, 'cloud_crowd/dispatcher'
|
51
50
|
|
52
51
|
# Increment the schema version when there's a backwards incompatible change.
|
53
52
|
SCHEMA_VERSION = 4
|
@@ -113,7 +112,11 @@ module CloudCrowd
|
|
113
112
|
configuration = YAML.load(ERB.new(File.read(config_path)).result)
|
114
113
|
ActiveRecord::Base.establish_connection(configuration)
|
115
114
|
if validate_schema
|
116
|
-
|
115
|
+
begin
|
116
|
+
version = ActiveRecord::Base.connection.select_values('select max(version) from schema_migrations').first.to_i
|
117
|
+
rescue
|
118
|
+
version = 0
|
119
|
+
end
|
117
120
|
return true if version == SCHEMA_VERSION
|
118
121
|
puts "Your database schema is out of date. Please use `crowd load_schema` to update it. This will wipe all the tables, so make sure that your jobs have a chance to finish first.\nexiting..."
|
119
122
|
exit
|
@@ -208,6 +211,12 @@ module CloudCrowd
|
|
208
211
|
@identity == :node
|
209
212
|
end
|
210
213
|
|
214
|
+
# Output a message with the current Timestamp prepended.
|
215
|
+
# Sinatra will re-direct stdout to a log file located at "log_path"
|
216
|
+
def log(message)
|
217
|
+
printf("%-20s %s\n", Time.now.strftime("%F-%T:"), message)
|
218
|
+
end
|
219
|
+
|
211
220
|
end
|
212
221
|
|
213
222
|
end
|
@@ -83,7 +83,7 @@ Options:
|
|
83
83
|
pid_path = CloudCrowd.pid_path('server.pid')
|
84
84
|
rackup_path = File.expand_path("#{@options[:config_path]}/config.ru")
|
85
85
|
FileUtils.mkdir_p(CloudCrowd.log_path) if @options[:daemonize] && !File.exists?(CloudCrowd.log_path)
|
86
|
-
puts "Starting CloudCrowd Central Server on port #{port}..."
|
86
|
+
puts "Starting CloudCrowd Central Server (#{VERSION}) on port #{port}..."
|
87
87
|
exec "thin -e #{@options[:environment]} -p #{port} #{daemonize} --tag cloud-crowd-server --log #{log_path} --pid #{pid_path} -R #{rackup_path} start"
|
88
88
|
end
|
89
89
|
|
@@ -114,7 +114,7 @@ Options:
|
|
114
114
|
# will be long-lived, although its workers will come and go.
|
115
115
|
def start_node
|
116
116
|
@options[:port] ||= Node::DEFAULT_PORT
|
117
|
-
puts "Starting CloudCrowd Node on port #{@options[:port]}..."
|
117
|
+
puts "Starting CloudCrowd Node (#{VERSION}) on port #{@options[:port]}..."
|
118
118
|
Node.new(@options)
|
119
119
|
end
|
120
120
|
|
@@ -242,4 +242,4 @@ Options:
|
|
242
242
|
end
|
243
243
|
|
244
244
|
end
|
245
|
-
end
|
245
|
+
end
|
@@ -0,0 +1,52 @@
|
|
1
|
+
module CloudCrowd
|
2
|
+
|
3
|
+
# The dispatcher is responsible for distributing work_units
|
4
|
+
# to the worker nodes.
|
5
|
+
#
|
6
|
+
# It automatically performs the distribution on a set schedule,
|
7
|
+
# but can also be signaled to perform distribution immediately
|
8
|
+
class Dispatcher
|
9
|
+
|
10
|
+
# Starts distributing jobs every "distribution_interval" seconds
|
11
|
+
def initialize(distribution_interval)
|
12
|
+
@mutex = Mutex.new
|
13
|
+
@awaken = ConditionVariable.new
|
14
|
+
distribute_periodically(distribution_interval)
|
15
|
+
end
|
16
|
+
|
17
|
+
# Sends a signal to the distribution thread.
|
18
|
+
# If it's asleep, it will wake up and perform a distribution.
|
19
|
+
def distribute!
|
20
|
+
@mutex.synchronize do
|
21
|
+
@awaken.signal
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
private
|
26
|
+
|
27
|
+
# Perform distribution of work units in a background thread
|
28
|
+
def distribute_periodically(interval)
|
29
|
+
Thread.new{
|
30
|
+
loop do
|
31
|
+
perform_distribution
|
32
|
+
# Sleep for "interval" seconds.
|
33
|
+
# If awaken isn't signaled, timeout and attempt distribution
|
34
|
+
@mutex.synchronize do
|
35
|
+
@awaken.wait(@mutex, interval)
|
36
|
+
end
|
37
|
+
end
|
38
|
+
}
|
39
|
+
end
|
40
|
+
|
41
|
+
def perform_distribution
|
42
|
+
#CloudCrowd.log "Distributing jobs to nodes"
|
43
|
+
begin
|
44
|
+
WorkUnit.distribute_to_nodes
|
45
|
+
rescue StandardError => e
|
46
|
+
CloudCrowd.log "Exception: #{e}"
|
47
|
+
CloudCrowd.log e.backtrace
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
end
|
52
|
+
end
|
@@ -53,7 +53,7 @@ module CloudCrowd
|
|
53
53
|
return queue_for_workers([outs]) if merging?
|
54
54
|
if complete?
|
55
55
|
update_attributes(:outputs => outs, :time => time_taken)
|
56
|
-
|
56
|
+
CloudCrowd.log "Job ##{id} (#{action}) #{display_status}." unless ENV['RACK_ENV'] == 'test'
|
57
57
|
CloudCrowd.defer { fire_callback } if callback_url
|
58
58
|
end
|
59
59
|
self
|
@@ -82,7 +82,7 @@ module CloudCrowd
|
|
82
82
|
response = RestClient.post(callback_url, {:job => self.to_json})
|
83
83
|
CloudCrowd.defer { self.destroy } if response && response.code == 201
|
84
84
|
rescue RestClient::Exception => e
|
85
|
-
|
85
|
+
CloudCrowd.log "Job ##{id} (#{action}) failed to fire callback: #{callback_url}"
|
86
86
|
end
|
87
87
|
end
|
88
88
|
|
@@ -32,7 +32,13 @@ module CloudCrowd
|
|
32
32
|
:max_workers => params[:max_workers],
|
33
33
|
:enabled_actions => params[:enabled_actions]
|
34
34
|
}
|
35
|
-
|
35
|
+
host_attr = {:host => params[:host]}
|
36
|
+
if (record = where(host_attr).first)
|
37
|
+
record.update_attributes!(attrs)
|
38
|
+
record
|
39
|
+
else
|
40
|
+
create!(attrs.merge(host_attr))
|
41
|
+
end
|
36
42
|
end
|
37
43
|
|
38
44
|
# Dispatch a WorkUnit to this node. Places the node at back at the end of
|
@@ -46,14 +52,14 @@ module CloudCrowd
|
|
46
52
|
touch && true
|
47
53
|
rescue RestClient::RequestTimeout
|
48
54
|
# The node's gone away. Destroy it and it will check in when it comes back
|
49
|
-
|
55
|
+
CloudCrowd.log "Node #{host} received RequestTimeout, removing it"
|
50
56
|
destroy && false
|
51
57
|
rescue RestClient::RequestFailed => e
|
52
58
|
raise e unless e.http_code == 503 && e.http_body == Node::OVERLOADED_MESSAGE
|
53
59
|
update_attribute(:busy, true) && false
|
54
60
|
rescue RestClient::Exception, Errno::ECONNREFUSED, Timeout::Error, Errno::ECONNRESET=>e
|
55
61
|
# Couldn't post to node, assume it's gone away.
|
56
|
-
|
62
|
+
CloudCrowd.log "Node #{host} received #{e.class} #{e}, removing it"
|
57
63
|
destroy && false
|
58
64
|
end
|
59
65
|
|
@@ -121,7 +127,6 @@ module CloudCrowd
|
|
121
127
|
# Redistribute in a separate thread to avoid delaying shutdown.
|
122
128
|
def redistribute_work_units
|
123
129
|
release_work_units
|
124
|
-
CloudCrowd.defer { WorkUnit.distribute_to_nodes }
|
125
130
|
end
|
126
131
|
|
127
132
|
end
|
@@ -79,7 +79,7 @@ module CloudCrowd
|
|
79
79
|
reservation = SecureRandom.random_number(MAX_RESERVATION)
|
80
80
|
conditions = "reservation is null and node_record_id is null and status in (#{INCOMPLETE.join(',')}) and #{options[:conditions]}"
|
81
81
|
query = WorkUnit.where(conditions)
|
82
|
-
query.limit(options[:limit]) if options[:limit]
|
82
|
+
query = query.limit(options[:limit]) if options[:limit]
|
83
83
|
any = query.update_all("reservation = #{reservation}") > 0
|
84
84
|
any && reservation
|
85
85
|
end
|
data/lib/cloud_crowd/node.rb
CHANGED
@@ -59,7 +59,8 @@ module CloudCrowd
|
|
59
59
|
throw :halt, [503, OVERLOADED_MESSAGE] if @overloaded
|
60
60
|
unit = JSON.parse(params[:work_unit])
|
61
61
|
pid = fork { Worker.new(self, unit).run }
|
62
|
-
Process.detach(pid)
|
62
|
+
thread = Process.detach(pid)
|
63
|
+
track_work(unit["id"], thread)
|
63
64
|
json :pid => pid
|
64
65
|
end
|
65
66
|
|
@@ -78,6 +79,7 @@ module CloudCrowd
|
|
78
79
|
@overloaded = false
|
79
80
|
@max_load = CloudCrowd.config[:max_load]
|
80
81
|
@min_memory = CloudCrowd.config[:min_free_memory]
|
82
|
+
@work = {}
|
81
83
|
start unless ENV['RACK_ENV'] == 'test'
|
82
84
|
end
|
83
85
|
|
@@ -92,7 +94,7 @@ module CloudCrowd
|
|
92
94
|
@server.daemonize if @daemon
|
93
95
|
trap_signals
|
94
96
|
asset_store
|
95
|
-
@server_thread =
|
97
|
+
@server_thread = Thread.new { @server.start }
|
96
98
|
check_in(true)
|
97
99
|
check_in_periodically
|
98
100
|
monitor_system if @max_load || @min_memory
|
@@ -110,7 +112,7 @@ module CloudCrowd
|
|
110
112
|
:enabled_actions => @enabled_actions.join(',')
|
111
113
|
)
|
112
114
|
rescue RestClient::Exception, Errno::ECONNREFUSED
|
113
|
-
|
115
|
+
CloudCrowd.log "Failed to connect to the central server (#{@central.to_s})."
|
114
116
|
raise SystemExit if critical
|
115
117
|
end
|
116
118
|
|
@@ -151,15 +153,44 @@ module CloudCrowd
|
|
151
153
|
raise NotImplementedError, "'min_free_memory' is not yet implemented on your platform"
|
152
154
|
end
|
153
155
|
end
|
156
|
+
|
157
|
+
def track_work(id, thread)
|
158
|
+
@work[id] = { thread: thread, start: Time.now }
|
159
|
+
end
|
154
160
|
|
155
|
-
|
161
|
+
def check_on_workers
|
162
|
+
# ToDo, this isn't really thread safe.
|
163
|
+
# there are events in which a job completes and exits successfully
|
164
|
+
# while iteration here is taking place. However the interleaving
|
165
|
+
# is such that the work unit should be complete / cleaned up already
|
166
|
+
# even in the event that a thread is flagged as dead here.
|
167
|
+
@work.each do |unit_id, work|
|
168
|
+
unless work[:thread].alive?
|
169
|
+
CloudCrowd.log "Notifying central server that worker #{work[:thread].pid} for unit #{unit_id} mysteriously died."
|
170
|
+
data = {
|
171
|
+
id: unit_id,
|
172
|
+
pid: work[:thread].pid,
|
173
|
+
status: 'failed',
|
174
|
+
output: { output: "Worker thread #{work[:thread].pid} died on #{host} prior to #{Time.now}" }.to_json,
|
175
|
+
time: Time.now - work[:start] # this is time until failure was noticed
|
176
|
+
}
|
177
|
+
@central["/work/#{unit_id}"].put(data)
|
178
|
+
resolve_work(unit_id)
|
179
|
+
end
|
180
|
+
end
|
181
|
+
end
|
182
|
+
|
183
|
+
def resolve_work(unit_id)
|
184
|
+
@work.delete(unit_id)
|
185
|
+
end
|
186
|
+
|
156
187
|
private
|
157
188
|
|
158
189
|
# Launch a monitoring thread that periodically checks the node's load
|
159
190
|
# average and the amount of free memory remaining. If we transition out of
|
160
191
|
# the overloaded state, let central know.
|
161
192
|
def monitor_system
|
162
|
-
@monitor_thread =
|
193
|
+
@monitor_thread = Thread.new do
|
163
194
|
loop do
|
164
195
|
was_overloaded = @overloaded
|
165
196
|
@overloaded = overloaded?
|
@@ -173,8 +204,9 @@ module CloudCrowd
|
|
173
204
|
# will assume that the node has gone down. Checking in will let central know
|
174
205
|
# it's still online.
|
175
206
|
def check_in_periodically
|
176
|
-
@check_in_thread =
|
207
|
+
@check_in_thread = Thread.new do
|
177
208
|
loop do
|
209
|
+
check_on_workers
|
178
210
|
reply = ""
|
179
211
|
1.upto(5).each do | attempt_number |
|
180
212
|
# sleep for an ever increasing amount of time to prevent overloading the server
|
@@ -183,13 +215,13 @@ module CloudCrowd
|
|
183
215
|
# if we did not receive a reply, the server has went away; it
|
184
216
|
# will reply with an empty string if the check-in succeeds
|
185
217
|
if reply.nil?
|
186
|
-
|
218
|
+
CloudCrowd.log "Failed on attempt ##{attempt_number} to check in with server"
|
187
219
|
else
|
188
220
|
break
|
189
221
|
end
|
190
222
|
end
|
191
223
|
if reply.nil?
|
192
|
-
|
224
|
+
CloudCrowd.log "Giving up after repeated attempts to contact server"
|
193
225
|
raise SystemExit
|
194
226
|
end
|
195
227
|
end
|
@@ -200,7 +232,6 @@ module CloudCrowd
|
|
200
232
|
def trap_signals
|
201
233
|
Signal.trap('QUIT') { shut_down }
|
202
234
|
Signal.trap('INT') { shut_down }
|
203
|
-
Signal.trap('KILL') { shut_down }
|
204
235
|
Signal.trap('TERM') { shut_down }
|
205
236
|
end
|
206
237
|
|
data/lib/cloud_crowd/server.rb
CHANGED
@@ -20,6 +20,10 @@ module CloudCrowd
|
|
20
20
|
class Server < Sinatra::Base
|
21
21
|
use ActiveRecord::ConnectionAdapters::ConnectionManagement
|
22
22
|
|
23
|
+
# The interval (in seconds) at which the server will distribute
|
24
|
+
# new work units to the nodes
|
25
|
+
DISTRIBUTE_INTERVAL=20
|
26
|
+
|
23
27
|
set :root, ROOT
|
24
28
|
set :authorization_realm, "CloudCrowd"
|
25
29
|
|
@@ -54,11 +58,6 @@ module CloudCrowd
|
|
54
58
|
`tail -n 100 #{CloudCrowd.log_path('server.log')}`
|
55
59
|
end
|
56
60
|
|
57
|
-
# Get the JSON for what a worker is up to.
|
58
|
-
get '/worker/:name' do
|
59
|
-
json WorkUnit.find_by_worker_name(params[:name]) || {}
|
60
|
-
end
|
61
|
-
|
62
61
|
# To monitor the central server with Monit, God, Nagios, or another
|
63
62
|
# monitoring tool, you can hit /heartbeat to make sure.
|
64
63
|
get '/heartbeat' do
|
@@ -71,8 +70,8 @@ module CloudCrowd
|
|
71
70
|
# Distributes all work units to available nodes.
|
72
71
|
post '/jobs' do
|
73
72
|
job = Job.create_from_request(JSON.parse(params[:job]))
|
74
|
-
CloudCrowd.
|
75
|
-
|
73
|
+
CloudCrowd.log("Job ##{job.id} (#{job.action}) started.") unless ENV['RACK_ENV'] == 'test'
|
74
|
+
@dispatcher.distribute!
|
76
75
|
json job
|
77
76
|
end
|
78
77
|
|
@@ -95,11 +94,8 @@ module CloudCrowd
|
|
95
94
|
# configuration with the central server. Triggers distribution of WorkUnits.
|
96
95
|
put '/node/:host' do
|
97
96
|
NodeRecord.check_in(params, request)
|
98
|
-
|
99
|
-
|
100
|
-
sleep 15 # Give the new node awhile to start listening
|
101
|
-
WorkUnit.distribute_to_nodes
|
102
|
-
end
|
97
|
+
CloudCrowd.log "Node #{params[:host]} checked in."
|
98
|
+
@dispatcher.distribute!
|
103
99
|
json nil
|
104
100
|
end
|
105
101
|
|
@@ -107,7 +103,8 @@ module CloudCrowd
|
|
107
103
|
# WorkUnits it may have had checked out.
|
108
104
|
delete '/node/:host' do
|
109
105
|
NodeRecord.destroy_all(:host => params[:host])
|
110
|
-
|
106
|
+
CloudCrowd.log "Node #{params[:host]} checked out."
|
107
|
+
@dispatcher.distribute!
|
111
108
|
json nil
|
112
109
|
end
|
113
110
|
|
@@ -115,20 +112,22 @@ module CloudCrowd
|
|
115
112
|
# they mark it back on the central server and exit. Triggers distribution
|
116
113
|
# of pending work units.
|
117
114
|
put '/work/:work_unit_id' do
|
115
|
+
CloudCrowd.log "Job #{current_work_unit.job_id} WorkUnit #{current_work_unit.id} #{current_work_unit.action} #{params[:status]} in #{params[:time]}"
|
118
116
|
case params[:status]
|
119
117
|
when 'succeeded' then current_work_unit.finish(params[:output], params[:time])
|
120
118
|
when 'failed' then current_work_unit.fail(params[:output], params[:time])
|
121
119
|
else error(500, "Completing a work unit must specify status.")
|
122
120
|
end
|
123
|
-
|
124
|
-
|
121
|
+
@dispatcher.distribute!
|
125
122
|
json nil
|
126
123
|
end
|
127
124
|
|
128
125
|
# At initialization record the identity of this Ruby instance as a server.
|
129
126
|
def initialize(*args)
|
127
|
+
CloudCrowd.log "Starting server"
|
130
128
|
super(*args)
|
131
129
|
CloudCrowd.identity = :server
|
130
|
+
@dispatcher = Dispatcher.new(DISTRIBUTE_INTERVAL)
|
132
131
|
end
|
133
132
|
|
134
133
|
end
|
data/lib/cloud_crowd/worker.rb
CHANGED
@@ -89,6 +89,7 @@ module CloudCrowd
|
|
89
89
|
action.cleanup_work_directory if action
|
90
90
|
fail_work_unit(e)
|
91
91
|
end
|
92
|
+
@node.resolve_work(@unit['id'])
|
92
93
|
end
|
93
94
|
|
94
95
|
# Run this worker inside of a fork. Attempts to exit cleanly.
|
@@ -141,7 +142,6 @@ module CloudCrowd
|
|
141
142
|
def trap_signals
|
142
143
|
Signal.trap('QUIT') { Process.exit! }
|
143
144
|
Signal.trap('INT') { Process.exit! }
|
144
|
-
Signal.trap('KILL') { Process.exit! }
|
145
145
|
Signal.trap('TERM') { Process.exit! }
|
146
146
|
end
|
147
147
|
|
@@ -4,9 +4,10 @@ require 'test_helper'
|
|
4
4
|
class FailingWorkUnitsTest < Minitest::Test
|
5
5
|
|
6
6
|
should "retry work units when they fail" do
|
7
|
-
WorkUnit.
|
7
|
+
WorkUnit.stubs(:distribute_to_nodes).returns([])
|
8
|
+
Dispatcher.any_instance.stubs(:distribute_periodically)
|
9
|
+
Dispatcher.any_instance.expects(:distribute!)
|
8
10
|
browser = Rack::Test::Session.new(Rack::MockSession.new(CloudCrowd::Server))
|
9
|
-
|
10
11
|
browser.post '/jobs', :job => {
|
11
12
|
'action' => 'failure_testing',
|
12
13
|
'inputs' => ['one', 'two', 'three'],
|
@@ -11,10 +11,13 @@ class ServerTest < Minitest::Test
|
|
11
11
|
context "The CloudCrowd::Server (Sinatra)" do
|
12
12
|
|
13
13
|
setup do
|
14
|
+
WorkUnit.stubs(:distribute_to_nodes).returns([])
|
15
|
+
Dispatcher.any_instance.stubs(:distribute_periodically)
|
14
16
|
Job.destroy_all
|
15
17
|
2.times { Job.make! }
|
16
18
|
end
|
17
19
|
|
20
|
+
|
18
21
|
should "set the identity of the Ruby instance" do
|
19
22
|
app.new
|
20
23
|
assert CloudCrowd.server?
|
@@ -37,7 +40,7 @@ class ServerTest < Minitest::Test
|
|
37
40
|
end
|
38
41
|
|
39
42
|
should "be able to create a job" do
|
40
|
-
|
43
|
+
Dispatcher.any_instance.expects(:distribute!)
|
41
44
|
post('/jobs', :job => '{"action":"graphics_magick","inputs":["http://www.google.com/"]}')
|
42
45
|
assert last_response.ok?
|
43
46
|
job_info = JSON.parse(last_response.body)
|
@@ -5,7 +5,8 @@ class WordCountTest < Minitest::Test
|
|
5
5
|
context "the word_count action" do
|
6
6
|
|
7
7
|
setup do
|
8
|
-
|
8
|
+
Dispatcher.any_instance.expects(:distribute!)
|
9
|
+
Dispatcher.any_instance.stubs(:distribute_periodically)
|
9
10
|
@asset_store = AssetStore.new
|
10
11
|
@browser = Rack::Test::Session.new(Rack::MockSession.new(CloudCrowd::Server))
|
11
12
|
@browser.put('/worker', :name => 'test_worker', :thread_status => 'sleeping')
|
@@ -16,14 +17,13 @@ class WordCountTest < Minitest::Test
|
|
16
17
|
should "be able to create a word_count job" do
|
17
18
|
assert @browser.last_response.ok?
|
18
19
|
info = JSON.parse(@browser.last_response.body)
|
19
|
-
|
20
|
-
|
20
|
+
assert_equal 'processing', info['status']
|
21
|
+
assert_equal 1, info['work_units']
|
21
22
|
end
|
22
23
|
|
23
24
|
should "be able to perform the processing stage of a word_count" do
|
24
25
|
action = CloudCrowd.actions['word_count'].new(1, "file://#{File.expand_path(__FILE__)}", {}, @asset_store)
|
25
|
-
|
26
|
-
assert count == 101
|
26
|
+
assert_equal 96, action.process
|
27
27
|
end
|
28
28
|
|
29
29
|
end
|
data/test/test_helper.rb
CHANGED
@@ -19,10 +19,25 @@ CloudCrowd.configure_database(here + '/config/database.yml')
|
|
19
19
|
|
20
20
|
require "#{CloudCrowd::ROOT}/test/blueprints.rb"
|
21
21
|
|
22
|
+
|
23
|
+
module TestHelpers
|
24
|
+
def setup
|
25
|
+
CloudCrowd::WorkUnit.stubs(:distribute_to_nodes).returns([])
|
26
|
+
CloudCrowd.stubs(:log)
|
27
|
+
super
|
28
|
+
end
|
29
|
+
def teardown
|
30
|
+
Mocha::Mockery.instance.teardown
|
31
|
+
Mocha::Mockery.reset_instance
|
32
|
+
super
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
22
36
|
class Minitest::Test
|
23
|
-
include
|
37
|
+
include TestHelpers
|
24
38
|
include Shoulda::Matchers::ActiveRecord
|
25
39
|
extend Shoulda::Matchers::ActiveRecord
|
26
40
|
include Shoulda::Matchers::ActiveModel
|
27
41
|
extend Shoulda::Matchers::ActiveModel
|
42
|
+
include CloudCrowd
|
28
43
|
end
|
data/test/unit/test_job.rb
CHANGED
@@ -77,6 +77,7 @@ class JobTest < Minitest::Test
|
|
77
77
|
Job.any_instance.stubs(:fire_callback).returns(true)
|
78
78
|
Job.any_instance.expects(:fire_callback)
|
79
79
|
@job.work_units.first.finish('{"output":"output"}', 10)
|
80
|
+
sleep 0.5 # block to allow Crowd.defer thread to execute
|
80
81
|
assert @job.all_work_units_complete?
|
81
82
|
end
|
82
83
|
|
@@ -39,6 +39,24 @@ class NodeRecordTest < Minitest::Test
|
|
39
39
|
assert !!URI.parse(@node.url)
|
40
40
|
end
|
41
41
|
|
42
|
+
should "be able to check-in and be updated" do
|
43
|
+
request = Rack::Request.new({'REMOTE_ADDR'=>'127.0.0.1'})
|
44
|
+
node_data = {
|
45
|
+
:ip_address => '127.0.0.1',
|
46
|
+
:host => "hostname-42:6032",
|
47
|
+
:busy => false,
|
48
|
+
:max_workers => 3,
|
49
|
+
:enabled_actions => 'graphics_magick,word_count'
|
50
|
+
}
|
51
|
+
node_data[:host] << ':6093'
|
52
|
+
record = NodeRecord.check_in( node_data, request )
|
53
|
+
assert_equal '127.0.0.1', record.ip_address
|
54
|
+
assert_equal 3, record.max_workers
|
55
|
+
node_data[:max_workers] = 2
|
56
|
+
updated_record = NodeRecord.check_in( node_data, request )
|
57
|
+
assert_equal updated_record, record
|
58
|
+
assert_equal 2, updated_record.max_workers
|
59
|
+
end
|
42
60
|
end
|
43
61
|
|
44
62
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: cloud-crowd
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.7.
|
4
|
+
version: 0.7.5
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Jeremy Ashkenas
|
@@ -10,7 +10,7 @@ authors:
|
|
10
10
|
autorequire:
|
11
11
|
bindir: bin
|
12
12
|
cert_chain: []
|
13
|
-
date:
|
13
|
+
date: 2015-06-17 00:00:00.000000000 Z
|
14
14
|
dependencies:
|
15
15
|
- !ruby/object:Gem::Dependency
|
16
16
|
name: activerecord
|
@@ -214,6 +214,7 @@ files:
|
|
214
214
|
- lib/cloud_crowd/asset_store/filesystem_store.rb
|
215
215
|
- lib/cloud_crowd/asset_store/s3_store.rb
|
216
216
|
- lib/cloud_crowd/command_line.rb
|
217
|
+
- lib/cloud_crowd/dispatcher.rb
|
217
218
|
- lib/cloud_crowd/exceptions.rb
|
218
219
|
- lib/cloud_crowd/helpers.rb
|
219
220
|
- lib/cloud_crowd/helpers/authorization.rb
|
@@ -226,6 +227,7 @@ files:
|
|
226
227
|
- lib/cloud_crowd/node.rb
|
227
228
|
- lib/cloud_crowd/schema.rb
|
228
229
|
- lib/cloud_crowd/server.rb
|
230
|
+
- lib/cloud_crowd/version.rb
|
229
231
|
- lib/cloud_crowd/worker.rb
|
230
232
|
- public/css/admin_console.css
|
231
233
|
- public/css/reset.css
|