openshift-origin-msg-broker-mcollective 1.2.6
Sign up to get free protection for your applications and to get access to all the features.
- data/COPYRIGHT +1 -0
- data/Gemfile +3 -0
- data/LICENSE +11 -0
- data/README.md +3 -0
- data/Rakefile +9 -0
- data/conf/openshift-origin-msg-broker-mcollective.conf.example +25 -0
- data/config/initializers/openshift-origin-msg-broker-mcollective.rb +34 -0
- data/lib/mcollective_msg_brojer_engine.rb +7 -0
- data/lib/openshift-origin-msg-broker-mcollective.rb +10 -0
- data/lib/openshift/mcollective_application_container_proxy.rb +1606 -0
- data/openshift-origin-msg-broker-mcollective.gemspec +37 -0
- data/rubygem-openshift-origin-msg-broker-mcollective.spec +147 -0
- metadata +201 -0
data/COPYRIGHT
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
Copyright 2012 Red Hat, Inc. and/or its affiliates.
|
data/Gemfile
ADDED
data/LICENSE
ADDED
@@ -0,0 +1,11 @@
|
|
1
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
2
|
+
you may not use this file except in compliance with the License.
|
3
|
+
You may obtain a copy of the License at
|
4
|
+
|
5
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
6
|
+
|
7
|
+
Unless required by applicable law or agreed to in writing, software
|
8
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
9
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
10
|
+
See the License for the specific language governing permissions and
|
11
|
+
limitations under the License.
|
data/README.md
ADDED
@@ -0,0 +1,3 @@
|
|
1
|
+
Notice of Export Control Law
|
2
|
+
|
3
|
+
This software distribution includes cryptographic software that is subject to the U.S. Export Administration Regulations (the "*EAR*") and other U.S. and foreign laws and may not be exported, re-exported or transferred (a) to any country listed in Country Group E:1 in Supplement No. 1 to part 740 of the EAR (currently, Cuba, Iran, North Korea, Sudan & Syria); (b) to any prohibited destination or to any end user who has been prohibited from participating in U.S. export transactions by any federal agency of the U.S. government; or (c) for use in connection with the design, development or production of nuclear, chemical or biological weapons, or rocket systems, space launch vehicles, or sounding rockets, or unmanned air vehicle systems.You may not download this software or technical information if you are located in one of these countries or otherwise subject to these restrictions. You may not provide this software or technical information to individuals or entities located in one of these countries or otherwise subject to these restrictions. You are also responsible for compliance with foreign law requirements applicable to the import, export and use of this software and technical information.
|
data/Rakefile
ADDED
@@ -0,0 +1,25 @@
|
|
1
|
+
# Some settings to configure how mcollective handles gear placement on nodes:
|
2
|
+
|
3
|
+
# Use districts when placing gears and moving them between hosts. Should be
|
4
|
+
# true except for particular dev/test situations.
|
5
|
+
DISTRICTS_ENABLED=true
|
6
|
+
|
7
|
+
# Require new gears to be placed in a district; when true, placement will fail
|
8
|
+
# if there isn't a district with capacity and the right gear profile.
|
9
|
+
DISTRICTS_REQUIRE_FOR_APP_CREATE=false
|
10
|
+
|
11
|
+
# Used as the default max gear capacity when creating a district.
|
12
|
+
DISTRICTS_MAX_CAPACITY=6000
|
13
|
+
|
14
|
+
# It is unlikely these will need to be changed
|
15
|
+
DISTRICTS_FIRST_UID=1000
|
16
|
+
MCOLLECTIVE_DISCTIMEOUT=5
|
17
|
+
MCOLLECTIVE_TIMEOUT=180
|
18
|
+
MCOLLECTIVE_VERBOSE=false
|
19
|
+
MCOLLECTIVE_PROGRESS_BAR=0
|
20
|
+
MCOLLECTIVE_CONFIG="/etc/mcollective/client.cfg"
|
21
|
+
|
22
|
+
# Place gears on nodes with the requested profile; should be true, as
|
23
|
+
# a false value means gear profiles are ignored and gears are placed arbitrarily.
|
24
|
+
NODE_PROFILE_ENABLED=true
|
25
|
+
|
@@ -0,0 +1,34 @@
|
|
1
|
+
require 'openshift-origin-common'
|
2
|
+
|
3
|
+
Broker::Application.configure do
|
4
|
+
unless config.respond_to? :msg_broker
|
5
|
+
conf_file = File.join(OpenShift::Config::PLUGINS_DIR, File.basename(__FILE__, '.rb') + '.conf')
|
6
|
+
if Rails.env.development?
|
7
|
+
dev_conf_file = File.join(OpenShift::Config::PLUGINS_DIR, File.basename(__FILE__, '.rb') + '-dev.conf')
|
8
|
+
if File.exist? dev_conf_file
|
9
|
+
conf_file = dev_conf_file
|
10
|
+
else
|
11
|
+
Rails.logger.info "Development configuration for #{File.basename(__FILE__, '.rb')} not found. Using production configuration."
|
12
|
+
end
|
13
|
+
end
|
14
|
+
conf = OpenShift::Config.new(conf_file)
|
15
|
+
|
16
|
+
config.msg_broker = {
|
17
|
+
:rpc_options => {
|
18
|
+
:disctimeout => conf.get("MCOLLECTIVE_DISCTIMEOUT", "5").to_i,
|
19
|
+
:timeout => conf.get("MCOLLECTIVE_TIMEOUT", "60").to_i,
|
20
|
+
:verbose => conf.get_bool("MCOLLECTIVE_VERBOSE", "false"),
|
21
|
+
:progress_bar => conf.get_bool("MCOLLECTIVE_PROGRESS_BAR", false),
|
22
|
+
:filter => {"identity" => [], "fact" => [], "agent" => [], "cf_class" => [], "compound" => []},
|
23
|
+
:config => conf.get("MCOLLECTIVE_CONFIG", "/etc/mcollective/client.cfg"),
|
24
|
+
},
|
25
|
+
:districts => {
|
26
|
+
:enabled => conf.get_bool("DISTRICTS_ENABLED", "false"),
|
27
|
+
:require_for_app_create => conf.get_bool("DISTRICTS_REQUIRE_FOR_APP_CREATE", "false"),
|
28
|
+
:max_capacity => conf.get("DISTRICTS_MAX_CAPACITY", "6000").to_i,
|
29
|
+
:first_uid => conf.get("DISTRICTS_FIRST_UID", "1000").to_i,
|
30
|
+
},
|
31
|
+
:node_profile_enabled => conf.get_bool("NODE_PROFILE_ENABLED", "false"),
|
32
|
+
}
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,10 @@
|
|
1
|
+
require "openshift-origin-common"
|
2
|
+
|
3
|
+
module OpenShift
|
4
|
+
module McollectiveMsgBrokerModule
|
5
|
+
require 'mcollective_msg_brojer_engine' if defined?(Rails) && Rails::VERSION::MAJOR == 3
|
6
|
+
end
|
7
|
+
end
|
8
|
+
|
9
|
+
require "openshift/mcollective_application_container_proxy.rb"
|
10
|
+
OpenShift::ApplicationContainerProxy.provider=OpenShift::MCollectiveApplicationContainerProxy
|
@@ -0,0 +1,1606 @@
|
|
1
|
+
require 'mcollective'
|
2
|
+
require 'open-uri'
|
3
|
+
|
4
|
+
include MCollective::RPC
|
5
|
+
module OpenShift
|
6
|
+
class MCollectiveApplicationContainerProxy < OpenShift::ApplicationContainerProxy
|
7
|
+
@@C_CONTROLLER = 'openshift-origin-node'
|
8
|
+
attr_accessor :id, :district
|
9
|
+
|
10
|
+
def initialize(id, district=nil)
|
11
|
+
@id = id
|
12
|
+
@district = district
|
13
|
+
end
|
14
|
+
|
15
|
+
def self.valid_gear_sizes_impl(user)
|
16
|
+
capability_gear_sizes = []
|
17
|
+
capability_gear_sizes = user.capabilities['gear_sizes'] if user.capabilities.has_key?('gear_sizes')
|
18
|
+
|
19
|
+
if user.auth_method == :broker_auth
|
20
|
+
return ["small", "medium"] | capability_gear_sizes
|
21
|
+
elsif !capability_gear_sizes.nil? and !capability_gear_sizes.empty?
|
22
|
+
return capability_gear_sizes
|
23
|
+
else
|
24
|
+
return ["small"]
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
def self.find_available_impl(node_profile=nil, district_uuid=nil)
|
29
|
+
district = nil
|
30
|
+
require_specific_district = !district_uuid.nil?
|
31
|
+
if Rails.configuration.msg_broker[:districts][:enabled] && (!district_uuid || district_uuid == 'NONE')
|
32
|
+
district = District.find_available(node_profile)
|
33
|
+
if district
|
34
|
+
district_uuid = district.uuid
|
35
|
+
Rails.logger.debug "DEBUG: find_available_impl: district_uuid: #{district_uuid}"
|
36
|
+
elsif Rails.configuration.msg_broker[:districts][:require_for_app_create]
|
37
|
+
raise OpenShift::NodeException.new("No district nodes available.", 140)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
current_server, current_capacity, preferred_district = rpc_find_available(node_profile, district_uuid, require_specific_district)
|
41
|
+
if !current_server
|
42
|
+
current_server, current_capacity, preferred_district = rpc_find_available(node_profile, district_uuid, require_specific_district, true)
|
43
|
+
end
|
44
|
+
district = preferred_district if preferred_district
|
45
|
+
Rails.logger.debug "CURRENT SERVER: #{current_server}"
|
46
|
+
raise OpenShift::NodeException.new("No nodes available.", 140) unless current_server
|
47
|
+
Rails.logger.debug "DEBUG: find_available_impl: current_server: #{current_server}: #{current_capacity}"
|
48
|
+
|
49
|
+
MCollectiveApplicationContainerProxy.new(current_server, district)
|
50
|
+
end
|
51
|
+
|
52
|
+
def self.find_one_impl(node_profile=nil)
|
53
|
+
current_server = rpc_find_one(node_profile)
|
54
|
+
Rails.logger.debug "CURRENT SERVER: #{current_server}"
|
55
|
+
raise OpenShift::NodeException.new("No nodes found.", 140) unless current_server
|
56
|
+
Rails.logger.debug "DEBUG: find_one_impl: current_server: #{current_server}"
|
57
|
+
|
58
|
+
MCollectiveApplicationContainerProxy.new(current_server)
|
59
|
+
end
|
60
|
+
|
61
|
+
def self.get_blacklisted_in_impl
|
62
|
+
[]
|
63
|
+
end
|
64
|
+
|
65
|
+
def self.blacklisted_in_impl?(name)
|
66
|
+
false
|
67
|
+
end
|
68
|
+
|
69
|
+
def get_available_cartridges
|
70
|
+
args = Hash.new
|
71
|
+
args['--porcelain'] = true
|
72
|
+
args['--with-descriptors'] = true
|
73
|
+
result = execute_direct(@@C_CONTROLLER, 'cartridge-list', args, false)
|
74
|
+
result = parse_result(result)
|
75
|
+
cart_data = JSON.parse(result.resultIO.string)
|
76
|
+
cart_data.map! {|c| OpenShift::Cartridge.new.from_descriptor(YAML.load(c))}
|
77
|
+
end
|
78
|
+
|
79
|
+
# Returns an array with following information
|
80
|
+
# [Filesystem, blocks_used, blocks_soft_limit, blocks_hard_limit, inodes_used,
|
81
|
+
# inodes_soft_limit, inodes_hard_limit]
|
82
|
+
def get_quota(gear)
|
83
|
+
args = Hash.new
|
84
|
+
args['--uuid'] = gear.uuid
|
85
|
+
reply = execute_direct(@@C_CONTROLLER, 'get-quota', args, false)
|
86
|
+
|
87
|
+
output = nil
|
88
|
+
exitcode = 0
|
89
|
+
if reply.length > 0
|
90
|
+
mcoll_result = reply[0]
|
91
|
+
if (mcoll_result && (defined? mcoll_result.results) && !mcoll_result.results[:data].nil?)
|
92
|
+
output = mcoll_result.results[:data][:output]
|
93
|
+
exitcode = mcoll_result.results[:data][:exitcode]
|
94
|
+
raise OpenShift::NodeException.new("Failed to get quota for user: #{output}", 143) unless exitcode == 0
|
95
|
+
else
|
96
|
+
raise OpenShift::NodeException.new("Node execution failure (error getting result from node). If the problem persists please contact Red Hat support.", 143)
|
97
|
+
end
|
98
|
+
else
|
99
|
+
raise OpenShift::NodeException.new("Node execution failure (error getting result from node). If the problem persists please contact Red Hat support.", 143)
|
100
|
+
end
|
101
|
+
output
|
102
|
+
end
|
103
|
+
|
104
|
+
# Set blocks hard limit and inodes ihard limit for uuid
|
105
|
+
def set_quota(gear, storage_in_gb, inodes)
|
106
|
+
args = Hash.new
|
107
|
+
args['--uuid'] = gear.uuid
|
108
|
+
# quota command acts on 1K blocks
|
109
|
+
args['--blocks'] = Integer(storage_in_gb * 1024 * 1024)
|
110
|
+
args['--inodes'] = inodes unless inodes.nil?
|
111
|
+
reply = execute_direct(@@C_CONTROLLER, 'set-quota', args, false)
|
112
|
+
|
113
|
+
output = nil
|
114
|
+
exitcode = 0
|
115
|
+
if reply.length > 0
|
116
|
+
mcoll_result = reply[0]
|
117
|
+
if (mcoll_result && (defined? mcoll_result.results) && !mcoll_result.results[:data].nil?)
|
118
|
+
output = mcoll_result.results[:data][:output]
|
119
|
+
exitcode = mcoll_result.results[:data][:exitcode]
|
120
|
+
raise OpenShift::NodeException.new("Failed to set quota for user: #{output}", 143) unless exitcode == 0
|
121
|
+
else
|
122
|
+
raise OpenShift::NodeException.new("Node execution failure (error getting result from node). If the problem persists please contact Red Hat support.", 143)
|
123
|
+
end
|
124
|
+
else
|
125
|
+
raise OpenShift::NodeException.new("Node execution failure (error getting result from node). If the problem persists please contact Red Hat support.", 143)
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
def reserve_uid(district_uuid=nil)
|
130
|
+
reserved_uid = nil
|
131
|
+
if Rails.configuration.msg_broker[:districts][:enabled]
|
132
|
+
if @district
|
133
|
+
district_uuid = @district.uuid
|
134
|
+
else
|
135
|
+
district_uuid = get_district_uuid unless district_uuid
|
136
|
+
end
|
137
|
+
if district_uuid && district_uuid != 'NONE'
|
138
|
+
reserved_uid = OpenShift::DataStore.instance.reserve_district_uid(district_uuid)
|
139
|
+
raise OpenShift::OOException.new("uid could not be reserved") unless reserved_uid
|
140
|
+
end
|
141
|
+
end
|
142
|
+
reserved_uid
|
143
|
+
end
|
144
|
+
|
145
|
+
def unreserve_uid(uid, district_uuid=nil)
|
146
|
+
if Rails.configuration.msg_broker[:districts][:enabled]
|
147
|
+
if @district
|
148
|
+
district_uuid = @district.uuid
|
149
|
+
else
|
150
|
+
district_uuid = get_district_uuid unless district_uuid
|
151
|
+
end
|
152
|
+
if district_uuid && district_uuid != 'NONE'
|
153
|
+
OpenShift::DataStore.instance.unreserve_district_uid(district_uuid, uid)
|
154
|
+
end
|
155
|
+
end
|
156
|
+
end
|
157
|
+
|
158
|
+
def inc_externally_reserved_uids_size(district_uuid=nil)
|
159
|
+
if Rails.configuration.msg_broker[:districts][:enabled]
|
160
|
+
if @district
|
161
|
+
district_uuid = @district.uuid
|
162
|
+
else
|
163
|
+
district_uuid = get_district_uuid unless district_uuid
|
164
|
+
end
|
165
|
+
if district_uuid && district_uuid != 'NONE'
|
166
|
+
OpenShift::DataStore.instance.inc_district_externally_reserved_uids_size(district_uuid)
|
167
|
+
end
|
168
|
+
end
|
169
|
+
end
|
170
|
+
|
171
|
+
def create(app, gear, quota_blocks=nil, quota_files=nil)
|
172
|
+
result = nil
|
173
|
+
(1..10).each do |i|
|
174
|
+
args = Hash.new
|
175
|
+
args['--with-app-uuid'] = app.uuid
|
176
|
+
args['--with-app-name'] = app.name
|
177
|
+
args['--with-container-uuid'] = gear.uuid
|
178
|
+
args['--with-container-name'] = gear.name
|
179
|
+
args['--with-quota-blocks'] = quota_blocks if quota_blocks
|
180
|
+
args['--with-quota-files'] = quota_files if quota_files
|
181
|
+
args['--with-namespace'] = app.domain.namespace
|
182
|
+
args['--with-uid'] = gear.uid if gear.uid
|
183
|
+
mcoll_reply = execute_direct(@@C_CONTROLLER, 'app-create', args)
|
184
|
+
result = parse_result(mcoll_reply)
|
185
|
+
if result.exitcode == 129 && has_uid_or_gid?(app.gear.uid) # Code to indicate uid already taken
|
186
|
+
destroy(app, gear, true)
|
187
|
+
inc_externally_reserved_uids_size
|
188
|
+
gear.uid = reserve_uid
|
189
|
+
app.save
|
190
|
+
else
|
191
|
+
break
|
192
|
+
end
|
193
|
+
end
|
194
|
+
result
|
195
|
+
end
|
196
|
+
|
197
|
+
def destroy(app, gear, keep_uid=false, uid=nil, skip_hooks=false)
|
198
|
+
args = Hash.new
|
199
|
+
args['--with-app-uuid'] = app.uuid
|
200
|
+
args['--with-app-name'] = app.name
|
201
|
+
args['--with-container-uuid'] = gear.uuid
|
202
|
+
args['--with-container-name'] = gear.name
|
203
|
+
args['--with-namespace'] = app.domain.namespace
|
204
|
+
args['--skip-hooks'] = true if skip_hooks
|
205
|
+
result = execute_direct(@@C_CONTROLLER, 'app-destroy', args)
|
206
|
+
result_io = parse_result(result)
|
207
|
+
|
208
|
+
uid = gear.uid unless uid
|
209
|
+
|
210
|
+
if uid && !keep_uid
|
211
|
+
unreserve_uid(uid)
|
212
|
+
end
|
213
|
+
return result_io
|
214
|
+
end
|
215
|
+
|
216
|
+
def add_authorized_ssh_key(app, gear, ssh_key, key_type=nil, comment=nil)
|
217
|
+
args = Hash.new
|
218
|
+
args['--with-app-uuid'] = app.uuid
|
219
|
+
args['--with-container-uuid'] = gear.uuid
|
220
|
+
args['--with-ssh-key'] = ssh_key
|
221
|
+
args['--with-ssh-key-type'] = key_type if key_type
|
222
|
+
args['--with-ssh-key-comment'] = comment if comment
|
223
|
+
result = execute_direct(@@C_CONTROLLER, 'authorized-ssh-key-add', args)
|
224
|
+
parse_result(result)
|
225
|
+
end
|
226
|
+
|
227
|
+
def remove_authorized_ssh_key(app, gear, ssh_key, comment=nil)
|
228
|
+
args = Hash.new
|
229
|
+
args['--with-app-uuid'] = app.uuid
|
230
|
+
args['--with-container-uuid'] = gear.uuid
|
231
|
+
args['--with-ssh-key'] = ssh_key
|
232
|
+
args['--with-ssh-comment'] = comment if comment
|
233
|
+
result = execute_direct(@@C_CONTROLLER, 'authorized-ssh-key-remove', args)
|
234
|
+
parse_result(result)
|
235
|
+
end
|
236
|
+
|
237
|
+
def add_env_var(app, gear, key, value)
|
238
|
+
args = Hash.new
|
239
|
+
args['--with-app-uuid'] = app.uuid
|
240
|
+
args['--with-container-uuid'] = gear.uuid
|
241
|
+
args['--with-key'] = key
|
242
|
+
args['--with-value'] = value
|
243
|
+
result = execute_direct(@@C_CONTROLLER, 'env-var-add', args)
|
244
|
+
parse_result(result)
|
245
|
+
end
|
246
|
+
|
247
|
+
def remove_env_var(app, gear, key)
|
248
|
+
args = Hash.new
|
249
|
+
args['--with-app-uuid'] = app.uuid
|
250
|
+
args['--with-container-uuid'] = gear.uuid
|
251
|
+
args['--with-key'] = key
|
252
|
+
result = execute_direct(@@C_CONTROLLER, 'env-var-remove', args)
|
253
|
+
parse_result(result)
|
254
|
+
end
|
255
|
+
|
256
|
+
def add_broker_auth_key(app, gear, iv, token)
|
257
|
+
args = Hash.new
|
258
|
+
args['--with-app-uuid'] = app.uuid
|
259
|
+
args['--with-container-uuid'] = gear.uuid
|
260
|
+
args['--with-iv'] = iv
|
261
|
+
args['--with-token'] = token
|
262
|
+
result = execute_direct(@@C_CONTROLLER, 'broker-auth-key-add', args)
|
263
|
+
parse_result(result)
|
264
|
+
end
|
265
|
+
|
266
|
+
def remove_broker_auth_key(app, gear)
|
267
|
+
args = Hash.new
|
268
|
+
args['--with-app-uuid'] = app.uuid
|
269
|
+
args['--with-container-uuid'] = gear.uuid
|
270
|
+
result = execute_direct(@@C_CONTROLLER, 'broker-auth-key-remove', args)
|
271
|
+
parse_result(result)
|
272
|
+
end
|
273
|
+
|
274
|
+
def show_state(app, gear)
|
275
|
+
args = Hash.new
|
276
|
+
args['--with-app-uuid'] = app.uuid
|
277
|
+
args['--with-container-uuid'] = gear.uuid
|
278
|
+
result = execute_direct(@@C_CONTROLLER, 'app-state-show', args)
|
279
|
+
parse_result(result)
|
280
|
+
end
|
281
|
+
|
282
|
+
def configure_cartridge(app, gear, cart, template_git_url=nil)
|
283
|
+
result_io = ResultIO.new
|
284
|
+
cart_data = nil
|
285
|
+
|
286
|
+
if framework_carts.include? cart
|
287
|
+
result_io = run_cartridge_command(cart, app, gear, "configure", template_git_url)
|
288
|
+
elsif embedded_carts.include? cart
|
289
|
+
result_io, cart_data = add_component(app,gear,cart)
|
290
|
+
else
|
291
|
+
#no-op
|
292
|
+
end
|
293
|
+
|
294
|
+
return result_io, cart_data
|
295
|
+
end
|
296
|
+
|
297
|
+
def deconfigure_cartridge(app, gear, cart)
|
298
|
+
if framework_carts.include? cart
|
299
|
+
run_cartridge_command(cart, app, gear, "deconfigure")
|
300
|
+
elsif embedded_carts.include? cart
|
301
|
+
remove_component(app,gear,cart)
|
302
|
+
else
|
303
|
+
ResultIO.new
|
304
|
+
end
|
305
|
+
end
|
306
|
+
|
307
|
+
def get_public_hostname
|
308
|
+
rpc_get_fact_direct('public_hostname')
|
309
|
+
end
|
310
|
+
|
311
|
+
def get_capacity
|
312
|
+
rpc_get_fact_direct('capacity').to_f
|
313
|
+
end
|
314
|
+
|
315
|
+
def get_active_capacity
|
316
|
+
rpc_get_fact_direct('active_capacity').to_f
|
317
|
+
end
|
318
|
+
|
319
|
+
def get_district_uuid
|
320
|
+
rpc_get_fact_direct('district_uuid')
|
321
|
+
end
|
322
|
+
|
323
|
+
def get_ip_address
|
324
|
+
rpc_get_fact_direct('ipaddress')
|
325
|
+
end
|
326
|
+
|
327
|
+
def get_public_ip_address
|
328
|
+
rpc_get_fact_direct('public_ip')
|
329
|
+
end
|
330
|
+
|
331
|
+
def get_node_profile
|
332
|
+
rpc_get_fact_direct('node_profile')
|
333
|
+
end
|
334
|
+
|
335
|
+
def get_quota_blocks
|
336
|
+
rpc_get_fact_direct('quota_blocks')
|
337
|
+
end
|
338
|
+
|
339
|
+
def get_quota_files
|
340
|
+
rpc_get_fact_direct('quota_files')
|
341
|
+
end
|
342
|
+
|
343
|
+
def execute_connector(app, gear, cart, connector_name, input_args)
|
344
|
+
args = Hash.new
|
345
|
+
args['--gear-uuid'] = gear.uuid
|
346
|
+
args['--cart-name'] = cart
|
347
|
+
args['--hook-name'] = connector_name
|
348
|
+
args['--input-args'] = input_args.join(" ")
|
349
|
+
mcoll_reply = execute_direct(@@C_CONTROLLER, 'connector-execute', args)
|
350
|
+
if mcoll_reply and mcoll_reply.length>0
|
351
|
+
mcoll_reply = mcoll_reply[0]
|
352
|
+
output = mcoll_reply.results[:data][:output]
|
353
|
+
exitcode = mcoll_reply.results[:data][:exitcode]
|
354
|
+
return [output, exitcode]
|
355
|
+
end
|
356
|
+
[nil, nil]
|
357
|
+
end
|
358
|
+
|
359
|
+
def start(app, gear, cart)
|
360
|
+
if framework_carts.include?(cart)
|
361
|
+
run_cartridge_command(cart, app, gear, "start")
|
362
|
+
elsif embedded_carts.include? cart
|
363
|
+
start_component(app, gear, cart)
|
364
|
+
else
|
365
|
+
ResultIO.new
|
366
|
+
end
|
367
|
+
end
|
368
|
+
|
369
|
+
def stop(app, gear, cart)
|
370
|
+
if framework_carts.include?(cart)
|
371
|
+
run_cartridge_command(cart, app, gear, "stop")
|
372
|
+
elsif embedded_carts.include? cart
|
373
|
+
stop_component(app, gear, cart)
|
374
|
+
else
|
375
|
+
ResultIO.new
|
376
|
+
end
|
377
|
+
end
|
378
|
+
|
379
|
+
def force_stop(app, gear, cart)
|
380
|
+
args = Hash.new
|
381
|
+
args['--with-app-uuid'] = app.uuid
|
382
|
+
args['--with-container-uuid'] = gear.uuid
|
383
|
+
result = execute_direct(@@C_CONTROLLER, 'force-stop', args)
|
384
|
+
parse_result(result)
|
385
|
+
end
|
386
|
+
|
387
|
+
def restart(app, gear, cart)
|
388
|
+
if framework_carts.include?(cart)
|
389
|
+
run_cartridge_command(cart, app, gear, "restart")
|
390
|
+
elsif embedded_carts.include? cart
|
391
|
+
restart_component(app, gear, cart)
|
392
|
+
else
|
393
|
+
ResultIO.new
|
394
|
+
end
|
395
|
+
end
|
396
|
+
|
397
|
+
def reload(app, gear, cart)
|
398
|
+
if framework_carts.include?(cart)
|
399
|
+
run_cartridge_command(cart, app, gear, "reload")
|
400
|
+
elsif embedded_carts.include? cart
|
401
|
+
reload_component(app, gear, cart)
|
402
|
+
else
|
403
|
+
ResultIO.new
|
404
|
+
end
|
405
|
+
end
|
406
|
+
|
407
|
+
def status(app, gear, cart)
|
408
|
+
if framework_carts.include?(cart)
|
409
|
+
run_cartridge_command(cart, app, gear, "status")
|
410
|
+
elsif embedded_carts.include? cart
|
411
|
+
component_status(app, gear, cart)
|
412
|
+
else
|
413
|
+
ResultIO.new
|
414
|
+
end
|
415
|
+
end
|
416
|
+
|
417
|
+
def tidy(app, gear, cart)
|
418
|
+
args = Hash.new
|
419
|
+
args['--with-app-uuid'] = app.uuid
|
420
|
+
args['--with-container-uuid'] = gear.uuid
|
421
|
+
result = execute_direct(@@C_CONTROLLER, 'tidy', args)
|
422
|
+
parse_result(result)
|
423
|
+
end
|
424
|
+
|
425
|
+
def threaddump(app, gear, cart)
|
426
|
+
if framework_carts.include?(cart)
|
427
|
+
run_cartridge_command(cart, app, gear, "threaddump")
|
428
|
+
else
|
429
|
+
ResultIO.new
|
430
|
+
end
|
431
|
+
end
|
432
|
+
|
433
|
+
def system_messages(app, gear, cart)
|
434
|
+
if framework_carts.include?(cart)
|
435
|
+
run_cartridge_command(cart, app, gear, "system-messages")
|
436
|
+
else
|
437
|
+
ResultIO.new
|
438
|
+
end
|
439
|
+
end
|
440
|
+
|
441
|
+
def expose_port(app, gear, cart)
|
442
|
+
run_cartridge_command(cart, app, gear, "expose-port")
|
443
|
+
end
|
444
|
+
|
445
|
+
def conceal_port(app, gear, cart)
|
446
|
+
run_cartridge_command(cart, app, gear, "conceal-port")
|
447
|
+
end
|
448
|
+
|
449
|
+
def show_port(app, gear, cart)
|
450
|
+
run_cartridge_command(cart, app, gear, "show-port")
|
451
|
+
end
|
452
|
+
|
453
|
+
def add_alias(app, gear, server_alias)
|
454
|
+
args = Hash.new
|
455
|
+
args['--with-container-uuid']=gear.uuid
|
456
|
+
args['--with-container-name']=gear.name
|
457
|
+
args['--with-namespace']=app.domain.namespace
|
458
|
+
args['--with-alias-name']=server_alias
|
459
|
+
result = execute_direct(@@C_CONTROLLER, 'add-alias', args)
|
460
|
+
parse_result(result)
|
461
|
+
end
|
462
|
+
|
463
|
+
def remove_alias(app, gear, server_alias)
|
464
|
+
args = Hash.new
|
465
|
+
args['--with-container-uuid']=gear.uuid
|
466
|
+
args['--with-container-name']=gear.name
|
467
|
+
args['--with-namespace']=app.domain.namespace
|
468
|
+
args['--with-alias-name']=server_alias
|
469
|
+
result = execute_direct(@@C_CONTROLLER, 'remove-alias', args)
|
470
|
+
parse_result(result)
|
471
|
+
end
|
472
|
+
|
473
|
+
def update_namespace(app, gear, cart, new_ns, old_ns)
|
474
|
+
mcoll_reply = execute_direct(cart, 'update-namespace', "#{gear.name} #{new_ns} #{old_ns} #{gear.uuid}")
|
475
|
+
parse_result(mcoll_reply)
|
476
|
+
end
|
477
|
+
|
478
|
+
def get_env_var_add_job(app, gear, key, value)
|
479
|
+
args = Hash.new
|
480
|
+
args['--with-app-uuid'] = app.uuid
|
481
|
+
args['--with-container-uuid'] = gear.uuid
|
482
|
+
args['--with-key'] = key
|
483
|
+
args['--with-value'] = value
|
484
|
+
job = RemoteJob.new('openshift-origin-node', 'env-var-add', args)
|
485
|
+
job
|
486
|
+
end
|
487
|
+
|
488
|
+
def get_env_var_remove_job(app, gear, key)
|
489
|
+
args = Hash.new
|
490
|
+
args['--with-app-uuid'] = app.uuid
|
491
|
+
args['--with-container-uuid'] = gear.uuid
|
492
|
+
args['--with-key'] = key
|
493
|
+
job = RemoteJob.new('openshift-origin-node', 'env-var-remove', args)
|
494
|
+
job
|
495
|
+
end
|
496
|
+
|
497
|
+
def get_add_authorized_ssh_key_job(app, gear, ssh_key, key_type=nil, comment=nil)
|
498
|
+
args = Hash.new
|
499
|
+
args['--with-app-uuid'] = app.uuid
|
500
|
+
args['--with-container-uuid'] = gear.uuid
|
501
|
+
args['--with-ssh-key'] = ssh_key
|
502
|
+
args['--with-ssh-key-type'] = key_type if key_type
|
503
|
+
args['--with-ssh-key-comment'] = comment if comment
|
504
|
+
job = RemoteJob.new('openshift-origin-node', 'authorized-ssh-key-add', args)
|
505
|
+
job
|
506
|
+
end
|
507
|
+
|
508
|
+
def get_remove_authorized_ssh_key_job(app, gear, ssh_key, comment=nil)
|
509
|
+
args = Hash.new
|
510
|
+
args['--with-app-uuid'] = app.uuid
|
511
|
+
args['--with-container-uuid'] = gear.uuid
|
512
|
+
args['--with-ssh-key'] = ssh_key
|
513
|
+
args['--with-ssh-comment'] = comment if comment
|
514
|
+
job = RemoteJob.new('openshift-origin-node', 'authorized-ssh-key-remove', args)
|
515
|
+
job
|
516
|
+
end
|
517
|
+
|
518
|
+
def get_broker_auth_key_add_job(app, gear, iv, token)
|
519
|
+
args = Hash.new
|
520
|
+
args['--with-app-uuid'] = app.uuid
|
521
|
+
args['--with-container-uuid'] = gear.uuid
|
522
|
+
args['--with-iv'] = iv
|
523
|
+
args['--with-token'] = token
|
524
|
+
job = RemoteJob.new('openshift-origin-node', 'broker-auth-key-add', args)
|
525
|
+
job
|
526
|
+
end
|
527
|
+
|
528
|
+
def get_broker_auth_key_remove_job(app, gear)
|
529
|
+
args = Hash.new
|
530
|
+
args['--with-app-uuid'] = app.uuid
|
531
|
+
args['--with-container-uuid'] = gear.uuid
|
532
|
+
job = RemoteJob.new('openshift-origin-node', 'broker-auth-key-remove', args)
|
533
|
+
job
|
534
|
+
end
|
535
|
+
|
536
|
+
def get_execute_connector_job(app, gear, cart, connector_name, input_args)
|
537
|
+
args = Hash.new
|
538
|
+
args['--gear-uuid'] = gear.uuid
|
539
|
+
args['--cart-name'] = cart
|
540
|
+
args['--hook-name'] = connector_name
|
541
|
+
args['--input-args'] = input_args.join(" ")
|
542
|
+
job = RemoteJob.new('openshift-origin-node', 'connector-execute', args)
|
543
|
+
job
|
544
|
+
end
|
545
|
+
|
546
|
+
def get_show_state_job(app, gear)
|
547
|
+
args = Hash.new
|
548
|
+
args['--with-app-uuid'] = app.uuid
|
549
|
+
args['--with-container-uuid'] = gear.uuid
|
550
|
+
job = RemoteJob.new('openshift-origin-node', 'app-state-show', args)
|
551
|
+
job
|
552
|
+
end
|
553
|
+
|
554
|
+
def get_status_job(app, gear, cart)
|
555
|
+
args = "'#{gear.name}' '#{app.domain.namespace}' '#{gear.uuid}'"
|
556
|
+
job = RemoteJob.new(cart, 'status', args)
|
557
|
+
job
|
558
|
+
end
|
559
|
+
|
560
|
+
def get_show_gear_quota_job(gear)
|
561
|
+
args = Hash.new
|
562
|
+
args['--uuid'] = gear.uuid
|
563
|
+
job = RemoteJob.new('openshift-origin-node', 'get-quota', args)
|
564
|
+
job
|
565
|
+
end
|
566
|
+
|
567
|
+
def get_update_gear_quota_job(gear, storage_in_gb, inodes)
|
568
|
+
args = Hash.new
|
569
|
+
args['--uuid'] = gear.uuid
|
570
|
+
# quota command acts on 1K blocks
|
571
|
+
args['--blocks'] = Integer(storage_in_gb * 1024 * 1024)
|
572
|
+
args['--inodes'] = inodes unless inodes.to_s.empty?
|
573
|
+
job = RemoteJob.new('openshift-origin-node', 'set-quota', args)
|
574
|
+
job
|
575
|
+
end
|
576
|
+
|
577
|
+
def move_gear_post(app, gear, destination_container, state_map, keep_uid)
|
578
|
+
reply = ResultIO.new
|
579
|
+
source_container = gear.container
|
580
|
+
gi = app.group_instance_map[gear.group_instance_name]
|
581
|
+
app.start_order.each do |ci_name|
|
582
|
+
next if not gi.component_instances.include? ci_name
|
583
|
+
cinst = app.comp_instance_map[ci_name]
|
584
|
+
cart = cinst.parent_cart_name
|
585
|
+
next if cart==app.name
|
586
|
+
idle, leave_stopped = state_map[ci_name]
|
587
|
+
unless leave_stopped
|
588
|
+
log_debug "DEBUG: Starting cartridge '#{cart}' in '#{app.name}' after move on #{destination_container.id}"
|
589
|
+
reply.append destination_container.send(:run_cartridge_command, cart, app, gear, "start", nil, false)
|
590
|
+
end
|
591
|
+
end
|
592
|
+
|
593
|
+
log_debug "DEBUG: Fixing DNS and mongo for gear '#{gear.name}' after move"
|
594
|
+
log_debug "DEBUG: Changing server identity of '#{gear.name}' from '#{source_container.id}' to '#{destination_container.id}'"
|
595
|
+
gear.server_identity = destination_container.id
|
596
|
+
gear.container = destination_container
|
597
|
+
if app.scalable and not gi.component_instances.find { |cart| cart.include? app.proxy_cartridge }
|
598
|
+
dns = OpenShift::DnsService.instance
|
599
|
+
begin
|
600
|
+
public_hostname = destination_container.get_public_hostname
|
601
|
+
dns.modify_application(gear.name, app.domain.namespace, public_hostname)
|
602
|
+
dns.publish
|
603
|
+
ensure
|
604
|
+
dns.close
|
605
|
+
end
|
606
|
+
end
|
607
|
+
|
608
|
+
if (not app.scalable) or (app.scalable and gi.component_instances.find { |cart| cart.include? app.proxy_cartridge } )
|
609
|
+
unless keep_uid
|
610
|
+
unless app.aliases.nil?
|
611
|
+
app.aliases.each do |server_alias|
|
612
|
+
reply.append destination_container.add_alias(app, app.gear, server_alias)
|
613
|
+
end
|
614
|
+
end
|
615
|
+
end
|
616
|
+
app.recreate_dns
|
617
|
+
end
|
618
|
+
|
619
|
+
reply
|
620
|
+
end
|
621
|
+
|
622
|
+
def move_gear_pre(app, gear, state_map, keep_uid)
|
623
|
+
reply = ResultIO.new
|
624
|
+
source_container = gear.container
|
625
|
+
gi = app.group_instance_map[gear.group_instance_name]
|
626
|
+
app.start_order.reverse.each { |ci_name|
|
627
|
+
next if not gi.component_instances.include? ci_name
|
628
|
+
cinst = app.comp_instance_map[ci_name]
|
629
|
+
cart = cinst.parent_cart_name
|
630
|
+
next if cart==app.name
|
631
|
+
idle, leave_stopped = state_map[ci_name]
|
632
|
+
# stop the cartridge if it needs to
|
633
|
+
unless leave_stopped
|
634
|
+
log_debug "DEBUG: Stopping existing app cartridge '#{cart}' before moving"
|
635
|
+
do_with_retry('stop') do
|
636
|
+
reply.append source_container.stop(app, gear, cart)
|
637
|
+
end
|
638
|
+
if framework_carts.include? cart
|
639
|
+
log_debug "DEBUG: Force stopping existing app cartridge '#{cart}' before moving"
|
640
|
+
do_with_retry('force-stop') do
|
641
|
+
reply.append source_container.force_stop(app, gear, cart)
|
642
|
+
end
|
643
|
+
end
|
644
|
+
end
|
645
|
+
# execute pre_move
|
646
|
+
if embedded_carts.include? cart and not keep_uid
|
647
|
+
if (app.scalable and not cart.include? app.proxy_cartridge) or not app.scalable
|
648
|
+
log_debug "DEBUG: Performing cartridge level pre-move for embedded #{cart} for '#{app.name}' on #{source_container.id}"
|
649
|
+
reply.append source_container.send(:run_cartridge_command, "embedded/" + cart, app, gear, "pre-move", nil, false)
|
650
|
+
end
|
651
|
+
end
|
652
|
+
}
|
653
|
+
reply
|
654
|
+
end
|
655
|
+
|
656
|
+
def move_gear(app, gear, destination_container, destination_district_uuid, allow_change_district, node_profile)
|
657
|
+
reply = ResultIO.new
|
658
|
+
state_map = {}
|
659
|
+
gear.node_profile = node_profile if node_profile
|
660
|
+
orig_uid = gear.uid
|
661
|
+
|
662
|
+
# resolve destination_container according to district
|
663
|
+
destination_container, destination_district_uuid, keep_uid = resolve_destination(app, gear, destination_container, destination_district_uuid, allow_change_district)
|
664
|
+
|
665
|
+
source_container = gear.container
|
666
|
+
destination_node_profile = destination_container.get_node_profile
|
667
|
+
if app.scalable and source_container.get_node_profile != destination_node_profile
|
668
|
+
log_debug "Cannot change node_profile for a gear belonging to a scalable application. The destination container's node profile is #{destination_node_profile}, while the gear's node_profile is #{gear.node_profile}"
|
669
|
+
raise OpenShift::UserException.new("Error moving app. Cannot change node profile.", 1)
|
670
|
+
end
|
671
|
+
|
672
|
+
# get the state of all cartridges
|
673
|
+
quota_blocks = nil
|
674
|
+
quota_files = nil
|
675
|
+
idle, leave_stopped, quota_blocks, quota_files = get_app_status(app)
|
676
|
+
gi = app.group_instance_map[gear.group_instance_name]
|
677
|
+
gi.component_instances.each do |ci_name|
|
678
|
+
cinst = app.comp_instance_map[ci_name]
|
679
|
+
cart = cinst.parent_cart_name
|
680
|
+
next if cart == app.name
|
681
|
+
# idle, leave_stopped, quota_blocks, quota_files = get_cart_status(app, gear, cart)
|
682
|
+
state_map[ci_name] = [idle, leave_stopped]
|
683
|
+
end
|
684
|
+
|
685
|
+
begin
|
686
|
+
# pre-move
|
687
|
+
reply.append move_gear_pre(app, gear, state_map, keep_uid)
|
688
|
+
|
689
|
+
unless keep_uid
|
690
|
+
gear.uid = destination_container.reserve_uid(destination_district_uuid)
|
691
|
+
log_debug "DEBUG: Reserved uid '#{gear.uid}' on district: '#{destination_district_uuid}'"
|
692
|
+
end
|
693
|
+
begin
|
694
|
+
# rsync gear with destination container
|
695
|
+
rsync_destination_container(app, gear, destination_container, destination_district_uuid, quota_blocks, quota_files, orig_uid, keep_uid)
|
696
|
+
|
697
|
+
# now execute 'move'/'expose-port' hooks on the new nest of the components
|
698
|
+
app.configure_order.each do |ci_name|
|
699
|
+
next if not gi.component_instances.include?(ci_name)
|
700
|
+
cinst = app.comp_instance_map[ci_name]
|
701
|
+
cart = cinst.parent_cart_name
|
702
|
+
next if cart == app.name
|
703
|
+
idle, leave_stopped = state_map[ci_name]
|
704
|
+
if keep_uid
|
705
|
+
if framework_carts.include?(cart)
|
706
|
+
log_debug "DEBUG: Restarting httpd proxy for '#{cart}' on #{destination_container.id}"
|
707
|
+
reply.append destination_container.send(:run_cartridge_command, 'abstract', app, gear, "restart-httpd-proxy", nil, false)
|
708
|
+
end
|
709
|
+
else
|
710
|
+
if embedded_carts.include?(cart)
|
711
|
+
if app.scalable and cart.include? app.proxy_cartridge
|
712
|
+
log_debug "DEBUG: Performing cartridge level move for '#{cart}' on #{destination_container.id}"
|
713
|
+
reply.append destination_container.send(:run_cartridge_command, cart, app, gear, "move", idle ? '--idle' : nil, false)
|
714
|
+
else
|
715
|
+
log_debug "DEBUG: Performing cartridge level move for embedded #{cart} for '#{app.name}' on #{destination_container.id}"
|
716
|
+
embedded_reply = destination_container.send(:run_cartridge_command, "embedded/" + cart, app, gear, "move", nil, false)
|
717
|
+
component_details = embedded_reply.appInfoIO.string
|
718
|
+
unless component_details.empty?
|
719
|
+
app.set_embedded_cart_info(cart, component_details)
|
720
|
+
end
|
721
|
+
reply.append embedded_reply
|
722
|
+
log_debug "DEBUG: Performing cartridge level post-move for embedded #{cart} for '#{app.name}' on #{destination_container.id}"
|
723
|
+
reply.append destination_container.send(:run_cartridge_command, "embedded/" + cart, app, gear, "post-move", nil, false)
|
724
|
+
end
|
725
|
+
end
|
726
|
+
if framework_carts.include?(cart)
|
727
|
+
log_debug "DEBUG: Performing cartridge level move for '#{cart}' on #{destination_container.id}"
|
728
|
+
reply.append destination_container.send(:run_cartridge_command, cart, app, gear, "move", idle ? '--idle' : nil, false)
|
729
|
+
end
|
730
|
+
end
|
731
|
+
if app.scalable and not cart.include? app.proxy_cartridge
|
732
|
+
begin
|
733
|
+
reply.append destination_container.expose_port(app, gear, cinst.parent_cart_name)
|
734
|
+
rescue Exception=>e
|
735
|
+
# just pass because some embedded cartridges do not have expose-port hook implemented (e.g. jenkins-client)
|
736
|
+
end
|
737
|
+
end
|
738
|
+
end
|
739
|
+
|
740
|
+
# start the gears again and change DNS entry
|
741
|
+
reply.append move_gear_post(app, gear, destination_container, state_map, keep_uid)
|
742
|
+
app.elaborate_descriptor
|
743
|
+
app.execute_connections
|
744
|
+
if app.scalable
|
745
|
+
# execute connections restart the haproxy service, so stop it explicitly if needed
|
746
|
+
app.start_order.reverse.each do |ci_name|
|
747
|
+
next if not gi.component_instances.include? ci_name
|
748
|
+
cinst = app.comp_instance_map[ci_name]
|
749
|
+
cart = cinst.parent_cart_name
|
750
|
+
next if cart==app.name
|
751
|
+
idle, leave_stopped = state_map[ci_name]
|
752
|
+
if leave_stopped and cart.include? app.proxy_cartridge
|
753
|
+
log_debug "DEBUG: Explicitly stopping cartridge '#{cart}' in '#{app.name}' after move on #{destination_container.id}"
|
754
|
+
reply.append destination_container.stop(app, gear, cart)
|
755
|
+
end
|
756
|
+
end
|
757
|
+
end
|
758
|
+
if gear.node_profile != destination_node_profile
|
759
|
+
log_debug "DEBUG: The gear's node profile changed from #{gear.node_profile} to #{destination_node_profile}"
|
760
|
+
gear.node_profile = destination_node_profile
|
761
|
+
if not app.scalable
|
762
|
+
app.node_profile = destination_node_profile
|
763
|
+
gi.node_profile = destination_node_profile
|
764
|
+
end
|
765
|
+
end
|
766
|
+
app.save
|
767
|
+
|
768
|
+
rescue Exception => e
|
769
|
+
gear.container = source_container
|
770
|
+
# remove-httpd-proxy of destination
|
771
|
+
log_debug "DEBUG: Moving failed. Rolling back gear '#{gear.name}' '#{app.name}' with remove-httpd-proxy on '#{destination_container.id}'"
|
772
|
+
gi.component_instances.each do |ci_name|
|
773
|
+
cinst = app.comp_instance_map[ci_name]
|
774
|
+
cart = cinst.parent_cart_name
|
775
|
+
next if cart == app.name
|
776
|
+
if framework_carts.include? cart
|
777
|
+
begin
|
778
|
+
reply.append destination_container.send(:run_cartridge_command, cart, app, gear, "remove-httpd-proxy", nil, false)
|
779
|
+
rescue Exception => e
|
780
|
+
log_debug "DEBUG: Remove httpd proxy with cart '#{cart}' failed on '#{destination_container.id}' - gear: '#{gear.name}', app: '#{app.name}'"
|
781
|
+
end
|
782
|
+
end
|
783
|
+
end
|
784
|
+
# destroy destination
|
785
|
+
log_debug "DEBUG: Moving failed. Rolling back gear '#{gear.name}' in '#{app.name}' with destroy on '#{destination_container.id}'"
|
786
|
+
reply.append destination_container.destroy(app, gear, keep_uid, nil, true)
|
787
|
+
raise
|
788
|
+
end
|
789
|
+
rescue Exception => e
|
790
|
+
begin
|
791
|
+
unless keep_uid
|
792
|
+
# post_move source
|
793
|
+
gi.component_instances.each do |ci_name|
|
794
|
+
cinst = app.comp_instance_map[ci_name]
|
795
|
+
cart = cinst.parent_cart_name
|
796
|
+
next if cart==app.name
|
797
|
+
proxy_cart = (app.proxy_cartridge or "")
|
798
|
+
if embedded_carts.include? cart and not cart.include? proxy_cart
|
799
|
+
begin
|
800
|
+
log_debug "DEBUG: Performing cartridge level post-move for embedded #{cart} for '#{app.name}' on #{source_container.id}"
|
801
|
+
reply.append source_container.send(:run_cartridge_command, "embedded/" + cart, app, gear, "post-move", nil, false)
|
802
|
+
rescue Exception => e
|
803
|
+
log_error "ERROR: Error performing cartridge level post-move for embedded #{cart} for '#{app.name}' on #{source_container.id}: #{e.message}"
|
804
|
+
end
|
805
|
+
end
|
806
|
+
end
|
807
|
+
end
|
808
|
+
# start source
|
809
|
+
gi.component_instances.each do |ci_name|
|
810
|
+
cinst = app.comp_instance_map[ci_name]
|
811
|
+
cart = cinst.parent_cart_name
|
812
|
+
next if cart==app.name
|
813
|
+
idle, leave_stopped = state_map[ci_name]
|
814
|
+
if not leave_stopped
|
815
|
+
reply.append source_container.run_cartridge_command(cart, app, gear, "start", nil, false) if framework_carts.include? cart
|
816
|
+
end
|
817
|
+
end
|
818
|
+
ensure
|
819
|
+
raise
|
820
|
+
end
|
821
|
+
end
|
822
|
+
|
823
|
+
move_gear_destroy_old(app, gear, keep_uid, orig_uid, source_container, destination_container)
|
824
|
+
|
825
|
+
log_debug "Successfully moved '#{app.name}' with gear uuid '#{gear.uuid}' from '#{source_container.id}' to '#{destination_container.id}'"
|
826
|
+
reply
|
827
|
+
end
|
828
|
+
|
829
|
+
def move_gear_destroy_old(app, gear, keep_uid, orig_uid, source_container, destination_container)
|
830
|
+
reply = ResultIO.new
|
831
|
+
log_debug "DEBUG: Deconfiguring old app '#{app.name}' on #{source_container.id} after move"
|
832
|
+
begin
|
833
|
+
reply.append source_container.destroy(app, gear, keep_uid, orig_uid, true)
|
834
|
+
rescue Exception => e
|
835
|
+
log_debug "DEBUG: The application '#{app.name}' with gear uuid '#{gear.uuid}' is now moved to '#{destination_container.id}' but not completely deconfigured from '#{source_container.id}'"
|
836
|
+
raise
|
837
|
+
end
|
838
|
+
reply
|
839
|
+
end
|
840
|
+
|
841
|
+
def resolve_destination(app, gear, destination_container, destination_district_uuid, allow_change_district)
|
842
|
+
source_container = gear.container
|
843
|
+
source_container = gear.get_proxy if source_container.nil?
|
844
|
+
source_district_uuid = source_container.get_district_uuid
|
845
|
+
if destination_container.nil?
|
846
|
+
unless allow_change_district
|
847
|
+
if destination_district_uuid && destination_district_uuid != source_district_uuid
|
848
|
+
raise OpenShift::UserException.new("Error moving app. Cannot change district from '#{source_district_uuid}' to '#{destination_district_uuid}' without allow_change_district flag.", 1)
|
849
|
+
else
|
850
|
+
destination_district_uuid = source_district_uuid unless source_district_uuid == 'NONE'
|
851
|
+
end
|
852
|
+
end
|
853
|
+
destination_container = MCollectiveApplicationContainerProxy.find_available_impl(gear.node_profile, destination_district_uuid)
|
854
|
+
log_debug "DEBUG: Destination container: #{destination_container.id}"
|
855
|
+
destination_district_uuid = destination_container.get_district_uuid
|
856
|
+
else
|
857
|
+
if destination_district_uuid
|
858
|
+
log_debug "DEBUG: Destination district uuid '#{destination_district_uuid}' is being ignored in favor of destination container #{destination_container.id}"
|
859
|
+
end
|
860
|
+
destination_district_uuid = destination_container.get_district_uuid
|
861
|
+
unless allow_change_district || (source_district_uuid == destination_district_uuid)
|
862
|
+
raise OpenShift::UserException.new("Resulting move would change districts from '#{source_district_uuid}' to '#{destination_district_uuid}'. You can use the 'allow_change_district' option if you really want this to happen.", 1)
|
863
|
+
end
|
864
|
+
end
|
865
|
+
|
866
|
+
log_debug "DEBUG: Source district uuid: #{source_district_uuid}"
|
867
|
+
log_debug "DEBUG: Destination district uuid: #{destination_district_uuid}"
|
868
|
+
keep_uid = destination_district_uuid == source_district_uuid && destination_district_uuid && destination_district_uuid != 'NONE'
|
869
|
+
log_debug "DEBUG: District unchanged keeping uid" if keep_uid
|
870
|
+
|
871
|
+
if source_container.id == destination_container.id
|
872
|
+
raise OpenShift::UserException.new("Error moving app. Old and new servers are the same: #{source_container.id}", 1)
|
873
|
+
end
|
874
|
+
return [destination_container, destination_district_uuid, keep_uid]
|
875
|
+
end
|
876
|
+
|
877
|
+
def rsync_destination_container(app, gear, destination_container, destination_district_uuid, quota_blocks, quota_files, orig_uid, keep_uid)
|
878
|
+
reply = ResultIO.new
|
879
|
+
source_container = gear.container
|
880
|
+
log_debug "DEBUG: Creating new account for gear '#{gear.name}' on #{destination_container.id}"
|
881
|
+
reply.append destination_container.create(app, gear, quota_blocks, quota_files)
|
882
|
+
|
883
|
+
log_debug "DEBUG: Moving content for app '#{app.name}', gear '#{gear.name}' to #{destination_container.id}"
|
884
|
+
rsync_keyfile = Rails.configuration.auth[:rsync_keyfile]
|
885
|
+
log_debug `eval \`ssh-agent\`; ssh-add #{rsync_keyfile}; ssh -o StrictHostKeyChecking=no -A root@#{source_container.get_ip_address} "rsync -aA#{(gear.uid && gear.uid == orig_uid) ? 'X' : ''} -e 'ssh -o StrictHostKeyChecking=no' /var/lib/openshift/#{gear.uuid}/ root@#{destination_container.get_ip_address}:/var/lib/openshift/#{gear.uuid}/"; exit_code=$?; ssh-agent -k; exit $exit_code`
|
886
|
+
if $?.exitstatus != 0
|
887
|
+
raise OpenShift::NodeException.new("Error moving app '#{app.name}', gear '#{gear.name}' from #{source_container.id} to #{destination_container.id}", 143)
|
888
|
+
end
|
889
|
+
|
890
|
+
if keep_uid
|
891
|
+
log_debug "DEBUG: Moving system components for app '#{app.name}', gear '#{gear.name}' to #{destination_container.id}"
|
892
|
+
log_debug `eval \`ssh-agent\`; ssh-add #{rsync_keyfile}; ssh -o StrictHostKeyChecking=no -A root@#{source_container.get_ip_address} "rsync -aAX -e 'ssh -o StrictHostKeyChecking=no' --include '.httpd.d/' --include '.httpd.d/#{gear.uuid}_***' --include '#{app.name}-#{app.domain.namespace}' --include '.last_access/' --include '.last_access/#{gear.uuid}' --exclude '*' /var/lib/openshift/ root@#{destination_container.get_ip_address}:/var/lib/openshift/"; exit_code=$?; ssh-agent -k; exit $exit_code`
|
893
|
+
if $?.exitstatus != 0
|
894
|
+
raise OpenShift::NodeException.new("Error moving system components for app '#{app.name}', gear '#{gear.name}' from #{source_container.id} to #{destination_container.id}", 143)
|
895
|
+
end
|
896
|
+
end
|
897
|
+
reply
|
898
|
+
end
|
899
|
+
|
900
|
+
def get_app_status(app)
|
901
|
+
get_cart_status(app, app.gear, app.framework)
|
902
|
+
end
|
903
|
+
|
904
|
+
def get_cart_status(app, gear, cart_name)
|
905
|
+
reply = ResultIO.new
|
906
|
+
source_container = gear.container
|
907
|
+
leave_stopped = false
|
908
|
+
idle = false
|
909
|
+
quota_blocks = nil
|
910
|
+
quota_files = nil
|
911
|
+
log_debug "DEBUG: Getting existing app '#{app.name}' status before moving"
|
912
|
+
do_with_retry('status') do
|
913
|
+
result = source_container.status(app, gear, cart_name)
|
914
|
+
result.cart_commands.each do |command_item|
|
915
|
+
case command_item[:command]
|
916
|
+
when "ATTR"
|
917
|
+
key = command_item[:args][0]
|
918
|
+
value = command_item[:args][1]
|
919
|
+
if key == 'status'
|
920
|
+
case value
|
921
|
+
when "ALREADY_STOPPED"
|
922
|
+
leave_stopped = true
|
923
|
+
when "ALREADY_IDLED"
|
924
|
+
leave_stopped = true
|
925
|
+
idle = true
|
926
|
+
end
|
927
|
+
elsif key == 'quota_blocks'
|
928
|
+
quota_blocks = value
|
929
|
+
elsif key == 'quota_files'
|
930
|
+
quota_files = value
|
931
|
+
end
|
932
|
+
end
|
933
|
+
reply.append result
|
934
|
+
end
|
935
|
+
end
|
936
|
+
|
937
|
+
if idle
|
938
|
+
log_debug "DEBUG: Gear component '#{cart_name}' was idle"
|
939
|
+
elsif leave_stopped
|
940
|
+
log_debug "DEBUG: Gear component '#{cart_name}' was stopped"
|
941
|
+
else
|
942
|
+
log_debug "DEBUG: Gear component '#{cart_name}' was running"
|
943
|
+
end
|
944
|
+
|
945
|
+
return [idle, leave_stopped, quota_blocks, quota_files]
|
946
|
+
end
|
947
|
+
|
948
|
+
#
|
949
|
+
# Execute an RPC call for the specified agent.
|
950
|
+
# If a server is supplied, only execute for that server.
|
951
|
+
#
|
952
|
+
def self.rpc_exec(agent, server=nil, forceRediscovery=false, options=rpc_options)
|
953
|
+
|
954
|
+
# Setup the rpc client
|
955
|
+
rpc_client = rpcclient(agent, :options => options)
|
956
|
+
|
957
|
+
# Filter to the specified server
|
958
|
+
if server
|
959
|
+
Rails.logger.debug("DEBUG: rpc_exec: Filtering rpc_exec to server #{server}")
|
960
|
+
rpc_client.identity_filter(server)
|
961
|
+
end
|
962
|
+
|
963
|
+
if forceRediscovery
|
964
|
+
rpc_client.reset
|
965
|
+
end
|
966
|
+
Rails.logger.debug("DEBUG: rpc_exec: rpc_client=#{rpc_client}")
|
967
|
+
|
968
|
+
# Execute a block and make sure we disconnect the client
|
969
|
+
begin
|
970
|
+
result = yield rpc_client
|
971
|
+
ensure
|
972
|
+
rpc_client.disconnect
|
973
|
+
end
|
974
|
+
|
975
|
+
raise OpenShift::NodeException.new("Node execution failure (error getting result from node). If the problem persists please contact Red Hat support.", 143) unless result
|
976
|
+
|
977
|
+
result
|
978
|
+
end
|
979
|
+
|
980
|
+
def set_district(uuid, active)
|
981
|
+
mc_args = { :uuid => uuid,
|
982
|
+
:active => active}
|
983
|
+
rpc_client = rpc_exec_direct('openshift')
|
984
|
+
result = nil
|
985
|
+
begin
|
986
|
+
Rails.logger.debug "DEBUG: rpc_client.custom_request('set_district', #{mc_args.inspect}, #{@id}, {'identity' => #{@id}})"
|
987
|
+
result = rpc_client.custom_request('set_district', mc_args, @id, {'identity' => @id})
|
988
|
+
Rails.logger.debug "DEBUG: #{result.inspect}"
|
989
|
+
ensure
|
990
|
+
rpc_client.disconnect
|
991
|
+
end
|
992
|
+
Rails.logger.debug result.inspect
|
993
|
+
result
|
994
|
+
end
|
995
|
+
|
996
|
+
protected
|
997
|
+
|
998
|
+
def do_with_retry(action, num_tries=2)
|
999
|
+
(1..num_tries).each do |i|
|
1000
|
+
begin
|
1001
|
+
yield
|
1002
|
+
if (i > 1)
|
1003
|
+
log_debug "DEBUG: Action '#{action}' succeeded on try #{i}. You can ignore previous error messages or following mcollective debug related to '#{action}'"
|
1004
|
+
end
|
1005
|
+
break
|
1006
|
+
rescue Exception => e
|
1007
|
+
log_debug "DEBUG: Error performing #{action} on existing app on try #{i}: #{e.message}"
|
1008
|
+
raise if i == num_tries
|
1009
|
+
end
|
1010
|
+
end
|
1011
|
+
end
|
1012
|
+
|
1013
|
+
def framework_carts
|
1014
|
+
@framework_carts ||= CartridgeCache.cartridge_names('standalone')
|
1015
|
+
end
|
1016
|
+
|
1017
|
+
def embedded_carts
|
1018
|
+
@embedded_carts ||= CartridgeCache.cartridge_names('embedded')
|
1019
|
+
end
|
1020
|
+
|
1021
|
+
def add_component(app, gear, component)
|
1022
|
+
reply = ResultIO.new
|
1023
|
+
begin
|
1024
|
+
reply.append run_cartridge_command('embedded/' + component, app, gear, 'configure')
|
1025
|
+
rescue Exception => e
|
1026
|
+
begin
|
1027
|
+
Rails.logger.debug "DEBUG: Failed to embed '#{component}' in '#{app.name}' for user '#{app.user.login}'"
|
1028
|
+
reply.debugIO << "Failed to embed '#{component} in '#{app.name}'"
|
1029
|
+
reply.append run_cartridge_command('embedded/' + component, app, gear, 'deconfigure')
|
1030
|
+
ensure
|
1031
|
+
raise
|
1032
|
+
end
|
1033
|
+
end
|
1034
|
+
|
1035
|
+
component_details = reply.appInfoIO.string.empty? ? '' : reply.appInfoIO.string
|
1036
|
+
reply.debugIO << "Embedded app details: #{component_details}"
|
1037
|
+
[reply, component_details]
|
1038
|
+
end
|
1039
|
+
|
1040
|
+
def remove_component(app, gear, component)
|
1041
|
+
Rails.logger.debug "DEBUG: Deconfiguring embedded application '#{component}' in application '#{app.name}' on node '#{@id}'"
|
1042
|
+
return run_cartridge_command('embedded/' + component, app, gear, 'deconfigure')
|
1043
|
+
end
|
1044
|
+
|
1045
|
+
def start_component(app, gear, component)
|
1046
|
+
run_cartridge_command('embedded/' + component, app, gear, "start")
|
1047
|
+
end
|
1048
|
+
|
1049
|
+
def stop_component(app, gear, component)
|
1050
|
+
run_cartridge_command('embedded/' + component, app, gear, "stop")
|
1051
|
+
end
|
1052
|
+
|
1053
|
+
def restart_component(app, gear, component)
|
1054
|
+
run_cartridge_command('embedded/' + component, app, gear, "restart")
|
1055
|
+
end
|
1056
|
+
|
1057
|
+
def reload_component(app, gear, component)
|
1058
|
+
run_cartridge_command('embedded/' + component, app, gear, "reload")
|
1059
|
+
end
|
1060
|
+
|
1061
|
+
def component_status(app, gear, component)
|
1062
|
+
run_cartridge_command('embedded/' + component, app, gear, "status")
|
1063
|
+
end
|
1064
|
+
|
1065
|
+
def log_debug(message)
|
1066
|
+
Rails.logger.debug message
|
1067
|
+
puts message
|
1068
|
+
end
|
1069
|
+
|
1070
|
+
def log_error(message)
|
1071
|
+
Rails.logger.error message
|
1072
|
+
puts message
|
1073
|
+
end
|
1074
|
+
|
1075
|
+
def execute_direct(cartridge, action, args, log_debug_output=true)
|
1076
|
+
mc_args = { :cartridge => cartridge,
|
1077
|
+
:action => action,
|
1078
|
+
:args => args }
|
1079
|
+
|
1080
|
+
rpc_client = rpc_exec_direct('openshift')
|
1081
|
+
result = nil
|
1082
|
+
begin
|
1083
|
+
Rails.logger.debug "DEBUG: rpc_client.custom_request('cartridge_do', #{mc_args.inspect}, #{@id}, {'identity' => #{@id}})"
|
1084
|
+
result = rpc_client.custom_request('cartridge_do', mc_args, @id, {'identity' => @id})
|
1085
|
+
Rails.logger.debug "DEBUG: #{result.inspect}" if log_debug_output
|
1086
|
+
ensure
|
1087
|
+
rpc_client.disconnect
|
1088
|
+
end
|
1089
|
+
result
|
1090
|
+
end
|
1091
|
+
|
1092
|
+
def parse_result(mcoll_reply, app=nil, command=nil)
|
1093
|
+
mcoll_result = mcoll_reply[0]
|
1094
|
+
output = nil
|
1095
|
+
if (mcoll_result && (defined? mcoll_result.results) && !mcoll_result.results[:data].nil?)
|
1096
|
+
output = mcoll_result.results[:data][:output]
|
1097
|
+
exitcode = mcoll_result.results[:data][:exitcode]
|
1098
|
+
else
|
1099
|
+
server_identity = app ? MCollectiveApplicationContainerProxy.find_app(app.uuid, app.name) : nil
|
1100
|
+
if server_identity && @id != server_identity
|
1101
|
+
raise OpenShift::InvalidNodeException.new("Node execution failure (invalid node). If the problem persists please contact Red Hat support.", 143, nil, server_identity)
|
1102
|
+
else
|
1103
|
+
raise OpenShift::NodeException.new("Node execution failure (error getting result from node). If the problem persists please contact Red Hat support.", 143)
|
1104
|
+
end
|
1105
|
+
end
|
1106
|
+
|
1107
|
+
result = MCollectiveApplicationContainerProxy.sanitize_result(output, exitcode)
|
1108
|
+
#result.exitcode = exitcode
|
1109
|
+
result
|
1110
|
+
end
|
1111
|
+
|
1112
|
+
#
|
1113
|
+
# Returns the server identity of the specified app
|
1114
|
+
#
|
1115
|
+
def self.find_app(app_uuid, app_name)
|
1116
|
+
server_identity = nil
|
1117
|
+
rpc_exec('openshift') do |client|
|
1118
|
+
client.has_app(:uuid => app_uuid,
|
1119
|
+
:application => app_name) do |response|
|
1120
|
+
output = response[:body][:data][:output]
|
1121
|
+
if output == true
|
1122
|
+
server_identity = response[:senderid]
|
1123
|
+
end
|
1124
|
+
end
|
1125
|
+
end
|
1126
|
+
return server_identity
|
1127
|
+
end
|
1128
|
+
|
1129
|
+
#
|
1130
|
+
# Returns whether this server has the specified app
|
1131
|
+
#
|
1132
|
+
def has_app?(app_uuid, app_name)
|
1133
|
+
MCollectiveApplicationContainerProxy.rpc_exec('openshift', @id) do |client|
|
1134
|
+
client.has_app(:uuid => app_uuid,
|
1135
|
+
:application => app_name) do |response|
|
1136
|
+
output = response[:body][:data][:output]
|
1137
|
+
return output == true
|
1138
|
+
end
|
1139
|
+
end
|
1140
|
+
end
|
1141
|
+
|
1142
|
+
#
|
1143
|
+
# Returns whether this server has the specified embedded app
|
1144
|
+
#
|
1145
|
+
def has_embedded_app?(app_uuid, embedded_type)
|
1146
|
+
MCollectiveApplicationContainerProxy.rpc_exec('openshift', @id) do |client|
|
1147
|
+
client.has_embedded_app(:uuid => app_uuid,
|
1148
|
+
:embedded_type => embedded_type) do |response|
|
1149
|
+
output = response[:body][:data][:output]
|
1150
|
+
return output == true
|
1151
|
+
end
|
1152
|
+
end
|
1153
|
+
end
|
1154
|
+
|
1155
|
+
#
|
1156
|
+
# Returns whether this server has already reserved the specified uid as a uid or gid
|
1157
|
+
#
|
1158
|
+
def has_uid_or_gid?(uid)
|
1159
|
+
MCollectiveApplicationContainerProxy.rpc_exec('openshift', @id) do |client|
|
1160
|
+
client.has_uid_or_gid(:uid => uid.to_s) do |response|
|
1161
|
+
output = response[:body][:data][:output]
|
1162
|
+
return output == true
|
1163
|
+
end
|
1164
|
+
end
|
1165
|
+
end
|
1166
|
+
|
1167
|
+
def run_cartridge_command(framework, app, gear, command, arg=nil, allow_move=true)
|
1168
|
+
|
1169
|
+
arguments = "'#{gear.name}' '#{app.domain.namespace}' '#{gear.uuid}'"
|
1170
|
+
arguments += " '#{arg}'" if arg
|
1171
|
+
|
1172
|
+
result = execute_direct(framework, command, arguments)
|
1173
|
+
begin
|
1174
|
+
resultIO = parse_result(result, app, command)
|
1175
|
+
rescue OpenShift::InvalidNodeException => e
|
1176
|
+
if command != 'configure' && allow_move
|
1177
|
+
@id = e.server_identity
|
1178
|
+
Rails.logger.debug "DEBUG: Changing server identity of '#{gear.name}' from '#{gear.server_identity}' to '#{@id}'"
|
1179
|
+
dns_service = OpenShift::DnsService.instance
|
1180
|
+
dns_service.modify_application(gear.name, app.domain.namespace, get_public_hostname)
|
1181
|
+
dns_service.publish
|
1182
|
+
gear.server_identity = @id
|
1183
|
+
app.save
|
1184
|
+
#retry
|
1185
|
+
result = execute_direct(framework, command, arguments)
|
1186
|
+
resultIO = parse_result(result, app, command)
|
1187
|
+
else
|
1188
|
+
raise
|
1189
|
+
end
|
1190
|
+
end
|
1191
|
+
Rails.logger.debug "DEBUG: Cartridge command #{framework}::#{command} exitcode = #{resultIO.exitcode}"
|
1192
|
+
if resultIO.exitcode != 0
|
1193
|
+
resultIO.debugIO << "Cartridge return code: " + resultIO.exitcode.to_s
|
1194
|
+
begin
|
1195
|
+
if resultIO.hasUserActionableError
|
1196
|
+
raise OpenShift::UserException.new(resultIO.errorIO.string, resultIO.exitcode)
|
1197
|
+
else
|
1198
|
+
raise OpenShift::NodeException.new("Node execution failure (invalid exit code from node). If the problem persists please contact Red Hat support.", 143, resultIO)
|
1199
|
+
end
|
1200
|
+
rescue OpenShift::NodeException => e
|
1201
|
+
if command == 'deconfigure'
|
1202
|
+
if framework.start_with?('embedded/')
|
1203
|
+
if has_embedded_app?(app.uuid, framework[9..-1])
|
1204
|
+
raise
|
1205
|
+
else
|
1206
|
+
Rails.logger.debug "DEBUG: Component '#{framework}' in application '#{app.name}' not found on node '#{@id}'. Continuing with deconfigure."
|
1207
|
+
end
|
1208
|
+
else
|
1209
|
+
if has_app?(app.uuid, app.name)
|
1210
|
+
raise
|
1211
|
+
else
|
1212
|
+
Rails.logger.debug "DEBUG: Application '#{app.name}' not found on node '#{@id}'. Continuing with deconfigure."
|
1213
|
+
end
|
1214
|
+
end
|
1215
|
+
else
|
1216
|
+
raise
|
1217
|
+
end
|
1218
|
+
end
|
1219
|
+
end
|
1220
|
+
resultIO
|
1221
|
+
end
|
1222
|
+
|
1223
|
+
def self.rpc_find_available(node_profile=nil, district_uuid=nil, require_specific_district=false, forceRediscovery=false)
|
1224
|
+
current_server, current_capacity = nil, nil
|
1225
|
+
additional_filters = [{:fact => "active_capacity",
|
1226
|
+
:value => '100',
|
1227
|
+
:operator => "<"}]
|
1228
|
+
|
1229
|
+
district_uuid = nil if district_uuid == 'NONE'
|
1230
|
+
|
1231
|
+
if Rails.configuration.msg_broker[:node_profile_enabled]
|
1232
|
+
if node_profile
|
1233
|
+
additional_filters.push({:fact => "node_profile",
|
1234
|
+
:value => node_profile,
|
1235
|
+
:operator => "=="})
|
1236
|
+
end
|
1237
|
+
end
|
1238
|
+
|
1239
|
+
if district_uuid
|
1240
|
+
additional_filters.push({:fact => "district_uuid",
|
1241
|
+
:value => district_uuid,
|
1242
|
+
:operator => "=="})
|
1243
|
+
additional_filters.push({:fact => "district_active",
|
1244
|
+
:value => true.to_s,
|
1245
|
+
:operator => "=="})
|
1246
|
+
else
|
1247
|
+
#TODO how do you filter on a fact not being set
|
1248
|
+
additional_filters.push({:fact => "district_uuid",
|
1249
|
+
:value => "NONE",
|
1250
|
+
:operator => "=="})
|
1251
|
+
|
1252
|
+
end
|
1253
|
+
|
1254
|
+
rpc_opts = nil
|
1255
|
+
unless forceRediscovery
|
1256
|
+
rpc_opts = rpc_options
|
1257
|
+
rpc_opts[:disctimeout] = 1
|
1258
|
+
end
|
1259
|
+
|
1260
|
+
server_infos = []
|
1261
|
+
rpc_get_fact('active_capacity', nil, forceRediscovery, additional_filters, rpc_opts) do |server, capacity|
|
1262
|
+
#Rails.logger.debug "Next server: #{server} active capacity: #{capacity}"
|
1263
|
+
server_infos << [server, capacity.to_f]
|
1264
|
+
end
|
1265
|
+
|
1266
|
+
if !server_infos.empty?
|
1267
|
+
# Pick a random node amongst the best choices available
|
1268
|
+
server_infos = server_infos.sort_by { |server_info| server_info[1] }
|
1269
|
+
if server_infos.first[1] < 80
|
1270
|
+
# If any server is < 80 then only pick from servers with < 80
|
1271
|
+
server_infos.delete_if { |server_info| server_info[1] >= 80 }
|
1272
|
+
end
|
1273
|
+
max_index = [server_infos.length, 4].min - 1
|
1274
|
+
server_infos = server_infos.first(max_index + 1)
|
1275
|
+
# Weight the servers with the most active_capacity the highest
|
1276
|
+
(0..max_index).each do |i|
|
1277
|
+
(max_index - i).times do
|
1278
|
+
server_infos << server_infos[i]
|
1279
|
+
end
|
1280
|
+
end
|
1281
|
+
elsif district_uuid && !require_specific_district
|
1282
|
+
# Well that didn't go too well. They wanted a district. Probably the most available one.
|
1283
|
+
# But it has no available nodes. Falling back to a best available algorithm. First
|
1284
|
+
# Find the most available nodes and match to their districts. Take out the almost
|
1285
|
+
# full nodes if possible and return one of the nodes within a district with a lot of space.
|
1286
|
+
additional_filters = [{:fact => "active_capacity",
|
1287
|
+
:value => '100',
|
1288
|
+
:operator => "<"},
|
1289
|
+
{:fact => "district_active",
|
1290
|
+
:value => true.to_s,
|
1291
|
+
:operator => "=="},
|
1292
|
+
{:fact => "district_uuid",
|
1293
|
+
:value => "NONE",
|
1294
|
+
:operator => "!="}]
|
1295
|
+
|
1296
|
+
if Rails.configuration.msg_broker[:node_profile_enabled]
|
1297
|
+
if node_profile
|
1298
|
+
additional_filters.push({:fact => "node_profile",
|
1299
|
+
:value => node_profile,
|
1300
|
+
:operator => "=="})
|
1301
|
+
end
|
1302
|
+
end
|
1303
|
+
|
1304
|
+
rpc_opts = nil
|
1305
|
+
unless forceRediscovery
|
1306
|
+
rpc_opts = rpc_options
|
1307
|
+
rpc_opts[:disctimeout] = 1
|
1308
|
+
end
|
1309
|
+
districts = District.find_all # candidate for caching
|
1310
|
+
rpc_get_fact('active_capacity', nil, forceRediscovery, additional_filters, rpc_opts) do |server, capacity|
|
1311
|
+
districts.each do |district|
|
1312
|
+
if district.server_identities.has_key?(server)
|
1313
|
+
server_infos << [server, capacity.to_f, district]
|
1314
|
+
break
|
1315
|
+
end
|
1316
|
+
end
|
1317
|
+
end
|
1318
|
+
unless server_infos.empty?
|
1319
|
+
server_infos = server_infos.sort_by { |server_info| server_info[1] }
|
1320
|
+
if server_infos.first[1] < 80
|
1321
|
+
server_infos.delete_if { |server_info| server_info[1] >= 80 }
|
1322
|
+
end
|
1323
|
+
server_infos = server_infos.sort_by { |server_info| server_info[2].available_capacity }
|
1324
|
+
server_infos = server_infos.first(8)
|
1325
|
+
end
|
1326
|
+
end
|
1327
|
+
current_district = nil
|
1328
|
+
unless server_infos.empty?
|
1329
|
+
server_info = server_infos[rand(server_infos.length)]
|
1330
|
+
current_server = server_info[0]
|
1331
|
+
current_capacity = server_info[1]
|
1332
|
+
current_district = server_info[2]
|
1333
|
+
Rails.logger.debug "Current server: #{current_server} active capacity: #{current_capacity}"
|
1334
|
+
end
|
1335
|
+
|
1336
|
+
return current_server, current_capacity, current_district
|
1337
|
+
end
|
1338
|
+
|
1339
|
+
def self.rpc_find_one(node_profile=nil)
|
1340
|
+
current_server = nil
|
1341
|
+
additional_filters = []
|
1342
|
+
|
1343
|
+
if Rails.configuration.msg_broker[:node_profile_enabled]
|
1344
|
+
if node_profile
|
1345
|
+
additional_filters.push({:fact => "node_profile",
|
1346
|
+
:value => node_profile,
|
1347
|
+
:operator => "=="})
|
1348
|
+
end
|
1349
|
+
end
|
1350
|
+
|
1351
|
+
options = rpc_options
|
1352
|
+
options[:filter]['fact'] = options[:filter]['fact'] + additional_filters
|
1353
|
+
options[:mcollective_limit_targets] = "1"
|
1354
|
+
|
1355
|
+
rpc_client = rpcclient('rpcutil', :options => options)
|
1356
|
+
begin
|
1357
|
+
rpc_client.get_fact(:fact => 'public_hostname') do |response|
|
1358
|
+
raise OpenShift::NodeException.new("No nodes found. If the problem persists please contact Red Hat support.", 140) unless Integer(response[:body][:statuscode]) == 0
|
1359
|
+
current_server = response[:senderid]
|
1360
|
+
end
|
1361
|
+
ensure
|
1362
|
+
rpc_client.disconnect
|
1363
|
+
end
|
1364
|
+
return current_server
|
1365
|
+
end
|
1366
|
+
|
1367
|
+
def self.rpc_options
|
1368
|
+
# Make a deep copy of the default options
|
1369
|
+
Marshal::load(Marshal::dump(Rails.configuration.msg_broker[:rpc_options]))
|
1370
|
+
end
|
1371
|
+
|
1372
|
+
#
|
1373
|
+
# Return the value of the MCollective response
|
1374
|
+
# for both a single result and a multiple result
|
1375
|
+
# structure
|
1376
|
+
#
|
1377
|
+
def self.rvalue(response)
|
1378
|
+
result = nil
|
1379
|
+
|
1380
|
+
if response[:body]
|
1381
|
+
result = response[:body][:data][:value]
|
1382
|
+
elsif response[:data]
|
1383
|
+
result = response[:data][:value]
|
1384
|
+
end
|
1385
|
+
|
1386
|
+
result
|
1387
|
+
end
|
1388
|
+
|
1389
|
+
def rsuccess(response)
|
1390
|
+
response[:body][:statuscode].to_i == 0
|
1391
|
+
end
|
1392
|
+
|
1393
|
+
#
|
1394
|
+
# Returns the fact value from the specified server.
|
1395
|
+
# Yields to the supplied block if there is a non-nil
|
1396
|
+
# value for the fact.
|
1397
|
+
#
|
1398
|
+
def self.rpc_get_fact(fact, server=nil, forceRediscovery=false, additional_filters=nil, custom_rpc_opts=nil)
|
1399
|
+
result = nil
|
1400
|
+
options = custom_rpc_opts ? custom_rpc_opts : rpc_options
|
1401
|
+
options[:filter]['fact'] = options[:filter]['fact'] + additional_filters if additional_filters
|
1402
|
+
|
1403
|
+
Rails.logger.debug("DEBUG: rpc_get_fact: fact=#{fact}")
|
1404
|
+
rpc_exec('rpcutil', server, forceRediscovery, options) do |client|
|
1405
|
+
client.get_fact(:fact => fact) do |response|
|
1406
|
+
next unless Integer(response[:body][:statuscode]) == 0
|
1407
|
+
|
1408
|
+
# Yield the server and the value to the block
|
1409
|
+
result = rvalue(response)
|
1410
|
+
yield response[:senderid], result if result
|
1411
|
+
end
|
1412
|
+
end
|
1413
|
+
|
1414
|
+
result
|
1415
|
+
end
|
1416
|
+
|
1417
|
+
#
|
1418
|
+
# Given a known fact and node, get a single fact directly.
|
1419
|
+
# This is significantly faster then the get_facts method
|
1420
|
+
# If multiple nodes of the same name exist, it will pick just one
|
1421
|
+
#
|
1422
|
+
def rpc_get_fact_direct(fact)
|
1423
|
+
options = MCollectiveApplicationContainerProxy.rpc_options
|
1424
|
+
|
1425
|
+
rpc_client = rpcclient("rpcutil", :options => options)
|
1426
|
+
begin
|
1427
|
+
result = rpc_client.custom_request('get_fact', {:fact => fact}, @id, {'identity' => @id})[0]
|
1428
|
+
if (result && defined? result.results && result.results.has_key?(:data))
|
1429
|
+
value = result.results[:data][:value]
|
1430
|
+
else
|
1431
|
+
raise OpenShift::NodeException.new("Node execution failure (error getting fact). If the problem persists please contact Red Hat support.", 143)
|
1432
|
+
end
|
1433
|
+
ensure
|
1434
|
+
rpc_client.disconnect
|
1435
|
+
end
|
1436
|
+
|
1437
|
+
return value
|
1438
|
+
end
|
1439
|
+
|
1440
|
+
#
|
1441
|
+
# Execute direct rpc call directly against a node
|
1442
|
+
# If more then one node exists, just pick one
|
1443
|
+
def rpc_exec_direct(agent)
|
1444
|
+
options = MCollectiveApplicationContainerProxy.rpc_options
|
1445
|
+
rpc_client = rpcclient(agent, :options => options)
|
1446
|
+
Rails.logger.debug("DEBUG: rpc_exec_direct: rpc_client=#{rpc_client}")
|
1447
|
+
rpc_client
|
1448
|
+
end
|
1449
|
+
|
1450
|
+
def self.get_all_gears_impl
|
1451
|
+
gear_map = {}
|
1452
|
+
sender_map = {}
|
1453
|
+
rpc_exec('openshift') do |client|
|
1454
|
+
client.get_all_gears() do |response|
|
1455
|
+
if response[:body][:statuscode] == 0
|
1456
|
+
sub_gear_map = response[:body][:data][:output]
|
1457
|
+
sender = response[:senderid]
|
1458
|
+
sub_gear_map.each { |k,v|
|
1459
|
+
gear_map[k] = [sender,Integer(v)]
|
1460
|
+
sender_map[sender] = {} if not sender_map.has_key? sender
|
1461
|
+
sender_map[sender][Integer(v)] = k
|
1462
|
+
}
|
1463
|
+
end
|
1464
|
+
end
|
1465
|
+
end
|
1466
|
+
return [gear_map, sender_map]
|
1467
|
+
end
|
1468
|
+
|
1469
|
+
def self.get_all_active_gears_impl
|
1470
|
+
active_gears_map = {}
|
1471
|
+
rpc_exec('openshift') do |client|
|
1472
|
+
client.get_all_active_gears() do |response|
|
1473
|
+
if response[:body][:statuscode] == 0
|
1474
|
+
active_gears = response[:body][:data][:output]
|
1475
|
+
sender = response[:senderid]
|
1476
|
+
active_gears_map[sender] = active_gears
|
1477
|
+
end
|
1478
|
+
end
|
1479
|
+
end
|
1480
|
+
active_gears_map
|
1481
|
+
end
|
1482
|
+
|
1483
|
+
def self.sanitize_result(output, exitcode=0)
|
1484
|
+
result = ResultIO.new
|
1485
|
+
result.exitcode = exitcode
|
1486
|
+
|
1487
|
+
if output && !output.empty?
|
1488
|
+
output.each_line do |line|
|
1489
|
+
if line =~ /^CLIENT_(MESSAGE|RESULT|DEBUG|ERROR|INTERNAL_ERROR): /
|
1490
|
+
if line =~ /^CLIENT_MESSAGE: /
|
1491
|
+
result.messageIO << line['CLIENT_MESSAGE: '.length..-1]
|
1492
|
+
elsif line =~ /^CLIENT_RESULT: /
|
1493
|
+
result.resultIO << line['CLIENT_RESULT: '.length..-1]
|
1494
|
+
elsif line =~ /^CLIENT_DEBUG: /
|
1495
|
+
result.debugIO << line['CLIENT_DEBUG: '.length..-1]
|
1496
|
+
elsif line =~ /^CLIENT_INTERNAL_ERROR: /
|
1497
|
+
result.errorIO << line['CLIENT_INTERNAL_ERROR: '.length..-1]
|
1498
|
+
else
|
1499
|
+
result.errorIO << line['CLIENT_ERROR: '.length..-1]
|
1500
|
+
result.hasUserActionableError = true
|
1501
|
+
end
|
1502
|
+
elsif line =~ /^CART_DATA: /
|
1503
|
+
result.data << line['CART_DATA: '.length..-1]
|
1504
|
+
elsif line =~ /^CART_PROPERTIES: /
|
1505
|
+
property = line['CART_PROPERTIES: '.length..-1].chomp.split('=')
|
1506
|
+
result.cart_properties[property[0]] = property[1]
|
1507
|
+
elsif line =~ /^APP_INFO: /
|
1508
|
+
result.appInfoIO << line['APP_INFO: '.length..-1]
|
1509
|
+
elsif result.exitcode == 0
|
1510
|
+
if line =~ /^SSH_KEY_(ADD|REMOVE): /
|
1511
|
+
if line =~ /^SSH_KEY_ADD: /
|
1512
|
+
key = line['SSH_KEY_ADD: '.length..-1].chomp
|
1513
|
+
result.cart_commands.push({:command => "SYSTEM_SSH_KEY_ADD", :args => [key]})
|
1514
|
+
else
|
1515
|
+
result.cart_commands.push({:command => "SYSTEM_SSH_KEY_REMOVE", :args => []})
|
1516
|
+
end
|
1517
|
+
elsif line =~ /^APP_SSH_KEY_(ADD|REMOVE): /
|
1518
|
+
if line =~ /^APP_SSH_KEY_ADD: /
|
1519
|
+
response = line['APP_SSH_KEY_ADD: '.length..-1].chomp
|
1520
|
+
cart,key = response.split(' ')
|
1521
|
+
cart = cart.gsub(".", "-")
|
1522
|
+
result.cart_commands.push({:command => "APP_SSH_KEY_ADD", :args => [cart, key]})
|
1523
|
+
else
|
1524
|
+
cart = line['APP_SSH_KEY_REMOVE: '.length..-1].chomp
|
1525
|
+
cart = cart.gsub(".", "-")
|
1526
|
+
result.cart_commands.push({:command => "APP_SSH_KEY_REMOVE", :args => [cart]})
|
1527
|
+
end
|
1528
|
+
elsif line =~ /^APP_ENV_VAR_REMOVE: /
|
1529
|
+
key = line['APP_ENV_VAR_REMOVE: '.length..-1].chomp
|
1530
|
+
result.cart_commands.push({:command => "APP_ENV_VAR_REMOVE", :args => [key]})
|
1531
|
+
elsif line =~ /^ENV_VAR_(ADD|REMOVE): /
|
1532
|
+
if line =~ /^ENV_VAR_ADD: /
|
1533
|
+
env_var = line['ENV_VAR_ADD: '.length..-1].chomp.split('=')
|
1534
|
+
result.cart_commands.push({:command => "ENV_VAR_ADD", :args => [env_var[0], env_var[1]]})
|
1535
|
+
else
|
1536
|
+
key = line['ENV_VAR_REMOVE: '.length..-1].chomp
|
1537
|
+
result.cart_commands.push({:command => "ENV_VAR_REMOVE", :args => [key]})
|
1538
|
+
end
|
1539
|
+
elsif line =~ /^BROKER_AUTH_KEY_(ADD|REMOVE): /
|
1540
|
+
if line =~ /^BROKER_AUTH_KEY_ADD: /
|
1541
|
+
result.cart_commands.push({:command => "BROKER_KEY_ADD", :args => []})
|
1542
|
+
else
|
1543
|
+
result.cart_commands.push({:command => "BROKER_KEY_REMOVE", :args => []})
|
1544
|
+
end
|
1545
|
+
elsif line =~ /^ATTR: /
|
1546
|
+
attr = line['ATTR: '.length..-1].chomp.split('=')
|
1547
|
+
result.cart_commands.push({:command => "ATTR", :args => [attr[0], attr[1]]})
|
1548
|
+
else
|
1549
|
+
#result.debugIO << line
|
1550
|
+
end
|
1551
|
+
else # exitcode != 0
|
1552
|
+
result.debugIO << line
|
1553
|
+
Rails.logger.debug "DEBUG: server results: " + line
|
1554
|
+
end
|
1555
|
+
end
|
1556
|
+
end
|
1557
|
+
result
|
1558
|
+
end
|
1559
|
+
|
1560
|
+
def self.execute_parallel_jobs_impl(handle)
|
1561
|
+
=begin
|
1562
|
+
handle.each { |id, job_list|
|
1563
|
+
options = MCollectiveApplicationContainerProxy.rpc_options
|
1564
|
+
rpc_client = rpcclient('openshift', :options => options)
|
1565
|
+
begin
|
1566
|
+
mc_args = { id => job_list }
|
1567
|
+
mcoll_reply = rpc_client.custom_request('execute_parallel', mc_args, id, {'identity' => id})
|
1568
|
+
rpc_client.disconnect
|
1569
|
+
if mcoll_reply and mcoll_reply.length > 0
|
1570
|
+
mcoll_reply = mcoll_reply[0]
|
1571
|
+
output = mcoll_reply.results[:data][:output]
|
1572
|
+
exitcode = mcoll_reply.results[:data][:exitcode]
|
1573
|
+
Rails.logger.debug("DEBUG: Output of parallel execute: #{output}, status: #{exitcode}")
|
1574
|
+
handle[id] = output if exitcode == 0
|
1575
|
+
end
|
1576
|
+
ensure
|
1577
|
+
rpc_client.disconnect
|
1578
|
+
end
|
1579
|
+
}
|
1580
|
+
=end
|
1581
|
+
if handle && !handle.empty?
|
1582
|
+
begin
|
1583
|
+
options = MCollectiveApplicationContainerProxy.rpc_options
|
1584
|
+
rpc_client = rpcclient('openshift', :options => options)
|
1585
|
+
mc_args = handle.clone
|
1586
|
+
identities = handle.keys
|
1587
|
+
rpc_client.custom_request('execute_parallel', mc_args, identities, {'identity' => identities}).each { |mcoll_reply|
|
1588
|
+
if mcoll_reply.results[:statuscode] == 0
|
1589
|
+
output = mcoll_reply.results[:data][:output]
|
1590
|
+
exitcode = mcoll_reply.results[:data][:exitcode]
|
1591
|
+
sender = mcoll_reply.results[:sender]
|
1592
|
+
Rails.logger.debug("DEBUG: Output of parallel execute: #{output}, exitcode: #{exitcode}, from: #{sender}")
|
1593
|
+
output.each do |o|
|
1594
|
+
r = MCollectiveApplicationContainerProxy.sanitize_result(o[:result_stdout], exitcode) if o.kind_of?(Hash) and o.include?(:result_stdout)
|
1595
|
+
o[:result_stdout] = r.resultIO.string.chomp if r and (r.resultIO.string.chomp.length != 0)
|
1596
|
+
end if output.kind_of?(Array)
|
1597
|
+
handle[sender] = output if exitcode == 0
|
1598
|
+
end
|
1599
|
+
}
|
1600
|
+
ensure
|
1601
|
+
rpc_client.disconnect
|
1602
|
+
end
|
1603
|
+
end
|
1604
|
+
end
|
1605
|
+
end
|
1606
|
+
end
|