openshift-origin-controller 1.3.2
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of openshift-origin-controller might be problematic. Click here for more details.
- data/COPYRIGHT +1 -0
- data/Gemfile +4 -0
- data/LICENSE +12 -0
- data/README.md +3 -0
- data/Rakefile +9 -0
- data/app/controllers/app_events_controller.rb +115 -0
- data/app/controllers/application_templates_controller.rb +19 -0
- data/app/controllers/applications_controller.rb +214 -0
- data/app/controllers/base_controller.rb +367 -0
- data/app/controllers/cartridges_controller.rb +48 -0
- data/app/controllers/descriptors_controller.rb +23 -0
- data/app/controllers/dns_resolvable_controller.rb +35 -0
- data/app/controllers/domains_controller.rb +156 -0
- data/app/controllers/emb_cart_controller.rb +276 -0
- data/app/controllers/emb_cart_events_controller.rb +52 -0
- data/app/controllers/environment_controller.rb +11 -0
- data/app/controllers/estimates_controller.rb +71 -0
- data/app/controllers/gear_groups_controller.rb +53 -0
- data/app/controllers/gears_controller.rb +70 -0
- data/app/controllers/keys_controller.rb +96 -0
- data/app/controllers/legacy_broker_controller.rb +510 -0
- data/app/controllers/quickstarts_controller.rb +29 -0
- data/app/controllers/user_controller.rb +38 -0
- data/app/helpers/cartridge_helper.rb +25 -0
- data/app/helpers/legacy_broker_helper.rb +21 -0
- data/app/helpers/user_action_logger.rb +38 -0
- data/app/models/application.rb +1718 -0
- data/app/models/application_template.rb +27 -0
- data/app/models/cartridge_cache.rb +51 -0
- data/app/models/cloud_user.rb +334 -0
- data/app/models/component_instance.rb +228 -0
- data/app/models/connection_endpoint.rb +10 -0
- data/app/models/district.rb +210 -0
- data/app/models/domain.rb +234 -0
- data/app/models/gear.rb +376 -0
- data/app/models/group_instance.rb +306 -0
- data/app/models/key.rb +20 -0
- data/app/models/legacy_reply.rb +15 -0
- data/app/models/legacy_request.rb +126 -0
- data/app/models/link.rb +11 -0
- data/app/models/message.rb +10 -0
- data/app/models/name_server_cache.rb +46 -0
- data/app/models/optional_param.rb +12 -0
- data/app/models/param.rb +13 -0
- data/app/models/remote_job.rb +57 -0
- data/app/models/rest_application.rb +126 -0
- data/app/models/rest_application10.rb +106 -0
- data/app/models/rest_application12.rb +124 -0
- data/app/models/rest_application_estimate.rb +12 -0
- data/app/models/rest_application_template.rb +20 -0
- data/app/models/rest_cartridge10.rb +41 -0
- data/app/models/rest_cartridge11.rb +151 -0
- data/app/models/rest_domain.rb +43 -0
- data/app/models/rest_domain10.rb +42 -0
- data/app/models/rest_estimates.rb +16 -0
- data/app/models/rest_gear.rb +14 -0
- data/app/models/rest_gear_group.rb +26 -0
- data/app/models/rest_key.rb +24 -0
- data/app/models/rest_reply.rb +31 -0
- data/app/models/rest_user.rb +43 -0
- data/app/models/result_io.rb +67 -0
- data/app/models/usage_record.rb +37 -0
- data/app/models/validators/app_validator.rb +30 -0
- data/app/models/validators/key_validator.rb +30 -0
- data/app/models/validators/namespace_validator.rb +18 -0
- data/config/routes.rb +36 -0
- data/lib/controller_engine.rb +7 -0
- data/lib/openshift-origin-controller.rb +14 -0
- data/lib/openshift/application_container_proxy.rb +241 -0
- data/lib/openshift/auth_service.rb +101 -0
- data/lib/openshift/data_store.rb +33 -0
- data/lib/openshift/dns_service.rb +41 -0
- data/lib/openshift/mongo_data_store.rb +671 -0
- data/openshift-origin-controller.gemspec +42 -0
- data/rubygem-openshift-origin-controller.spec +274 -0
- data/test/cucumber/application-estimate.feature +25 -0
- data/test/cucumber/cartridge-10gen-mms-agent.feature +28 -0
- data/test/cucumber/cartridge-cron.feature +32 -0
- data/test/cucumber/cartridge-haproxy.feature +31 -0
- data/test/cucumber/cartridge-jenkins-build.feature +12 -0
- data/test/cucumber/cartridge-jenkins-client.feature +10 -0
- data/test/cucumber/cartridge-lifecycle-diy.feature +21 -0
- data/test/cucumber/cartridge-lifecycle-jbossas.feature +61 -0
- data/test/cucumber/cartridge-lifecycle-jbosseap.feature +61 -0
- data/test/cucumber/cartridge-lifecycle-jbossews10.feature +61 -0
- data/test/cucumber/cartridge-lifecycle-jenkins.feature +41 -0
- data/test/cucumber/cartridge-lifecycle-nodejs.feature +59 -0
- data/test/cucumber/cartridge-lifecycle-perl.feature +40 -0
- data/test/cucumber/cartridge-lifecycle-php.feature +106 -0
- data/test/cucumber/cartridge-lifecycle-python.feature +40 -0
- data/test/cucumber/cartridge-lifecycle-ruby18.feature +49 -0
- data/test/cucumber/cartridge-lifecycle-ruby19.feature +41 -0
- data/test/cucumber/cartridge-mongodb.feature +31 -0
- data/test/cucumber/cartridge-mysql.feature +30 -0
- data/test/cucumber/cartridge-php.feature +14 -0
- data/test/cucumber/cartridge-phpmyadmin.feature +32 -0
- data/test/cucumber/cartridge-postgresql.feature +32 -0
- data/test/cucumber/cartridge-runtime-extended-db.feature +64 -0
- data/test/cucumber/cartridge-runtime-extended-jboss.feature +24 -0
- data/test/cucumber/cartridge-runtime-extended-nodejs.feature +21 -0
- data/test/cucumber/cartridge-runtime-extended-perl.feature +18 -0
- data/test/cucumber/cartridge-runtime-extended-php.feature +19 -0
- data/test/cucumber/cartridge-runtime-extended-python.feature +18 -0
- data/test/cucumber/cartridge-runtime-extended-ruby.feature +22 -0
- data/test/cucumber/cartridge-runtime-standard-diy.feature +6 -0
- data/test/cucumber/cartridge-runtime-standard-jbossas.feature +7 -0
- data/test/cucumber/cartridge-runtime-standard-jbosseap.feature +7 -0
- data/test/cucumber/cartridge-runtime-standard-jbossews10.feature +7 -0
- data/test/cucumber/cartridge-runtime-standard-jenkins.feature +8 -0
- data/test/cucumber/cartridge-runtime-standard-nodejs.feature +7 -0
- data/test/cucumber/cartridge-runtime-standard-perl.feature +6 -0
- data/test/cucumber/cartridge-runtime-standard-php.feature +6 -0
- data/test/cucumber/cartridge-runtime-standard-python.feature +6 -0
- data/test/cucumber/cartridge-runtime-standard-ruby.feature +19 -0
- data/test/cucumber/cartridge-switchyard.feature +36 -0
- data/test/cucumber/descriptor.feature +40 -0
- data/test/cucumber/embedded.feature +44 -0
- data/test/cucumber/idler.feature +75 -0
- data/test/cucumber/misc/descriptor/manifest.yml +22 -0
- data/test/cucumber/misc/php/db_test.php +21 -0
- data/test/cucumber/openshift-node.feature +21 -0
- data/test/cucumber/rest-application-templates.feature +31 -0
- data/test/cucumber/rest-applications.feature +431 -0
- data/test/cucumber/rest-cartridge-types.feature +16 -0
- data/test/cucumber/rest-domains.feature +276 -0
- data/test/cucumber/rest-gears.feature +38 -0
- data/test/cucumber/rest-keys.feature +247 -0
- data/test/cucumber/rest-quickstarts.feature +27 -0
- data/test/cucumber/rest-workflow.feature +64 -0
- data/test/cucumber/step_definitions/api_steps.rb +369 -0
- data/test/cucumber/step_definitions/application-estimate-steps.rb +51 -0
- data/test/cucumber/step_definitions/application_steps.rb +215 -0
- data/test/cucumber/step_definitions/cartridge-10gen-mms-agent_steps.rb +11 -0
- data/test/cucumber/step_definitions/cartridge-cron_steps.rb +51 -0
- data/test/cucumber/step_definitions/cartridge-haproxy_steps.rb +30 -0
- data/test/cucumber/step_definitions/cartridge-jenkins_steps.rb +93 -0
- data/test/cucumber/step_definitions/cartridge-lifecycle-nodejs_steps.rb +30 -0
- data/test/cucumber/step_definitions/cartridge-mongodb_steps.rb +60 -0
- data/test/cucumber/step_definitions/cartridge-mysql_steps.rb +56 -0
- data/test/cucumber/step_definitions/cartridge-php_steps.rb +72 -0
- data/test/cucumber/step_definitions/cartridge-postgresql_steps.rb +59 -0
- data/test/cucumber/step_definitions/cartridge-switchyard_steps.rb +29 -0
- data/test/cucumber/step_definitions/client_steps.rb +12 -0
- data/test/cucumber/step_definitions/descriptor_step.rb +32 -0
- data/test/cucumber/step_definitions/idler_steps.rb +37 -0
- data/test/cucumber/step_definitions/node_steps.rb +203 -0
- data/test/cucumber/step_definitions/runtime_steps.rb +547 -0
- data/test/cucumber/step_definitions/runtime_url_steps.rb +46 -0
- data/test/cucumber/step_definitions/trap-user-extended_steps.rb +14 -0
- data/test/cucumber/step_definitions/trap-user_steps.rb +58 -0
- data/test/cucumber/support/00_setup_helper.rb +106 -0
- data/test/cucumber/support/app_helper.rb +243 -0
- data/test/cucumber/support/assertions.rb +52 -0
- data/test/cucumber/support/command_helper.rb +453 -0
- data/test/cucumber/support/dns_helper.rb +54 -0
- data/test/cucumber/support/env.rb +5 -0
- data/test/cucumber/support/process_helper.rb +44 -0
- data/test/cucumber/support/runtime_support.rb +440 -0
- data/test/cucumber/support/unused.rb +27 -0
- data/test/cucumber/support/user_helper.rb +37 -0
- data/test/cucumber/trap-user-extended.feature +53 -0
- data/test/cucumber/trap-user.feature +34 -0
- data/test/ddns/1.168.192-rev.db.init +13 -0
- data/test/ddns/HOWTO.txt +207 -0
- data/test/ddns/Kexample.com.+157+06142.key +1 -0
- data/test/ddns/Kexample.com.+157+06142.private +7 -0
- data/test/ddns/authconfig.rb +14 -0
- data/test/ddns/example.com.db.init +23 -0
- data/test/ddns/example.com.key +4 -0
- data/test/ddns/named.ca +52 -0
- data/test/ddns/named.conf +48 -0
- data/test/ddns/named.empty +10 -0
- data/test/ddns/named.localhost +10 -0
- data/test/ddns/named.loopback +11 -0
- data/test/ddns/named.rfc1912.zones +42 -0
- data/test/ddns/named.root.key +5 -0
- data/test/ddns/named_service.rb +127 -0
- data/test/unit/bind_dns_service_test.rb +167 -0
- data/test/unit/broker_auth_test.rb +28 -0
- metadata +545 -0
@@ -0,0 +1,29 @@
|
|
1
|
+
class QuickstartsController < BaseController
|
2
|
+
respond_to :json
|
3
|
+
before_filter :check_version
|
4
|
+
|
5
|
+
def index
|
6
|
+
render_success(:ok, "quickstarts", quickstarts, "LIST_QUICKSTARTS", "Showing all quickstarts")
|
7
|
+
end
|
8
|
+
|
9
|
+
def show
|
10
|
+
id = params[:id]
|
11
|
+
if quickstart = quickstarts.find{ |obj| obj['quickstart']['id'] == id }
|
12
|
+
render_success(:ok, "quickstarts", [quickstart], "SHOW_QUICKSTART", "Showing quickstart for '#{id}'")
|
13
|
+
else
|
14
|
+
render_error(:not_found, "Quickstart '#{id}' not found", 118, "SHOW_QUICKSTART")
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
protected
|
19
|
+
def quickstarts
|
20
|
+
if File.exists?(file)
|
21
|
+
ActiveSupport::JSON.decode(IO.read(file)) rescue []
|
22
|
+
else
|
23
|
+
[]
|
24
|
+
end
|
25
|
+
end
|
26
|
+
def file
|
27
|
+
File.join(OpenShift::Config::CONF_DIR, 'quickstarts.json')
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
class UserController < BaseController
|
2
|
+
respond_to :json, :xml
|
3
|
+
before_filter :authenticate, :check_version
|
4
|
+
|
5
|
+
# GET /user
|
6
|
+
def show
|
7
|
+
unless @cloud_user
|
8
|
+
log_action(@request_id, 'nil', @login, "SHOW_USER", true, "User '#{@login}' not found")
|
9
|
+
return render_error(:not_found, "User '#{@login}' not found", 99)
|
10
|
+
end
|
11
|
+
render_success(:ok, "user", RestUser.new(@cloud_user, get_url, nolinks), "SHOW_USER")
|
12
|
+
end
|
13
|
+
|
14
|
+
# DELETE /user
|
15
|
+
# NOTE: Only applicable for subaccount users
|
16
|
+
def destroy
|
17
|
+
force = get_bool(params[:force])
|
18
|
+
|
19
|
+
unless @cloud_user
|
20
|
+
log_action(@request_id, 'nil', @login, "DELETE_USER", true, "User '#{@login}' not found")
|
21
|
+
return render_format_error(:not_found, "User '#{@login}' not found", 99)
|
22
|
+
end
|
23
|
+
return render_format_error(:forbidden, "User deletion not permitted. Only applicable for subaccount users.", 138, "DELETE_USER") unless @cloud_user.parent_user_login
|
24
|
+
|
25
|
+
begin
|
26
|
+
if force
|
27
|
+
@cloud_user.force_delete
|
28
|
+
else
|
29
|
+
return render_format_error(:unprocessable_entity, "User '#{@login}' has valid domain or applications. Either delete domain, applications and retry the operation or use 'force' option.",
|
30
|
+
139, "DELETE_USER") if !@cloud_user.domains.empty? or !@cloud_user.applications.empty?
|
31
|
+
@cloud_user.delete
|
32
|
+
end
|
33
|
+
render_format_success(:no_content, nil, nil, "DELETE_USER", "User #{@login} deleted.", true)
|
34
|
+
rescue Exception => e
|
35
|
+
return render_format_exception(e, "DELETE_USER")
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
@@ -0,0 +1,25 @@
|
|
1
|
+
module CartridgeHelper
|
2
|
+
def get_cartridges(application)
|
3
|
+
cartridges = Array.new
|
4
|
+
cartridges.push(RestCartridge11.new("standalone", application.framework, application, get_url, nil, nolinks)) if $requested_api_version != 1.0
|
5
|
+
|
6
|
+
application.embedded.each_key do |key|
|
7
|
+
if $requested_api_version == 1.0
|
8
|
+
cartridge = RestCartridge10.new("embedded", key, application, get_url, nil, nolinks)
|
9
|
+
else
|
10
|
+
cartridge = RestCartridge11.new("embedded", key, application, get_url, nil, nolinks)
|
11
|
+
end
|
12
|
+
cartridges.push(cartridge)
|
13
|
+
end if application.embedded
|
14
|
+
return cartridges
|
15
|
+
end
|
16
|
+
|
17
|
+
def check_cartridge_type(framework, container, cart_type)
|
18
|
+
carts = CartridgeCache.cartridge_names(cart_type)
|
19
|
+
Rails.logger.debug "Available cartridges #{carts.join(', ')}"
|
20
|
+
unless carts.include? framework
|
21
|
+
return false
|
22
|
+
end
|
23
|
+
return true
|
24
|
+
end
|
25
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
module LegacyBrokerHelper
|
2
|
+
def get_cached(key, opts={})
|
3
|
+
unless Rails.configuration.action_controller.perform_caching
|
4
|
+
if block_given?
|
5
|
+
return yield
|
6
|
+
end
|
7
|
+
end
|
8
|
+
|
9
|
+
val = Rails.cache.read(key)
|
10
|
+
unless val
|
11
|
+
if block_given?
|
12
|
+
val = yield
|
13
|
+
if val
|
14
|
+
Rails.cache.write(key, val, opts)
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
return val
|
20
|
+
end
|
21
|
+
end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
module UserActionLogger
|
2
|
+
|
3
|
+
@@action_logger = nil
|
4
|
+
|
5
|
+
def get_action_logger()
|
6
|
+
unless @@action_logger
|
7
|
+
log_file = nil
|
8
|
+
if Rails.configuration.user_action_logging[:logging_enabled]
|
9
|
+
log_file = Rails.configuration.user_action_logging[:log_filepath]
|
10
|
+
end
|
11
|
+
@@action_logger = Logger.new(log_file) unless log_file.nil?
|
12
|
+
end
|
13
|
+
@@action_logger
|
14
|
+
end
|
15
|
+
|
16
|
+
def log_action(request_id, user_id, login, action, success = true, description = "", args = {})
|
17
|
+
log_level = success ? Logger::DEBUG : Logger::ERROR
|
18
|
+
action_logger = get_action_logger()
|
19
|
+
|
20
|
+
if not action_logger.nil?
|
21
|
+
result = success ? "SUCCESS" : "FAILURE"
|
22
|
+
description = description.nil? ? "" : description.strip
|
23
|
+
time_obj = Time.new
|
24
|
+
date = time_obj.strftime("%Y-%m-%d")
|
25
|
+
time = time_obj.strftime("%H:%M:%S")
|
26
|
+
|
27
|
+
message = "#{result} DATE=#{date} TIME=#{time} ACTION=#{action} REQ_ID=#{request_id} USER_ID=#{user_id} LOGIN=#{login}"
|
28
|
+
args.each {|k,v| message += " #{k}=#{v}"}
|
29
|
+
|
30
|
+
action_logger.info("#{message} #{description}")
|
31
|
+
end
|
32
|
+
|
33
|
+
# Using a block prevents the message in the block from being executed
|
34
|
+
# if the log_level is lower than the one set for the logger
|
35
|
+
Rails.logger.add(log_level) {"[REQ_ID=#{request_id}] ACTION=#{action} #{description}"}
|
36
|
+
end
|
37
|
+
|
38
|
+
end
|
@@ -0,0 +1,1718 @@
|
|
1
|
+
require 'state_machine'
|
2
|
+
require 'syslog'
|
3
|
+
require 'shellwords'
|
4
|
+
|
5
|
+
class Application < OpenShift::Cartridge
|
6
|
+
attr_accessor :user, :creation_time, :uuid, :aliases, :cart_data,
|
7
|
+
:state, :group_instance_map, :comp_instance_map, :conn_endpoints_list,
|
8
|
+
:domain, :group_override_map, :working_comp_inst_hash,
|
9
|
+
:working_group_inst_hash, :configure_order, :start_order,
|
10
|
+
:scalable, :proxy_cartridge, :init_git_url, :node_profile,
|
11
|
+
:ssh_keys, :ngears, :usage_records, :destroyed_gears, :user_agent
|
12
|
+
primary_key :name
|
13
|
+
exclude_attributes :user, :comp_instance_map, :group_instance_map,
|
14
|
+
:working_comp_inst_hash, :working_group_inst_hash, :user_agent,
|
15
|
+
:group_override_map
|
16
|
+
include_attributes :comp_instances, :group_instances
|
17
|
+
|
18
|
+
APP_NAME_MAX_LENGTH = 32
|
19
|
+
DEFAULT_NODE_PROFILE = "small"
|
20
|
+
UNSCALABLE_FRAMEWORKS = ["jenkins-1.4", "diy-0.1"]
|
21
|
+
SCALABLE_EMBEDDED_CARTS = ["mysql-5.1", "mongodb-2.2", "postgresql-8.4", "jenkins-client-1.4"]
|
22
|
+
|
23
|
+
validate :extended_validator
|
24
|
+
|
25
|
+
validates_each :name, :allow_nil =>false do |record, attribute, val|
|
26
|
+
if !(val =~ /\A[A-Za-z0-9]+\z/)
|
27
|
+
record.errors.add attribute, {:message => "Invalid #{attribute} specified", :exit_code => 105}
|
28
|
+
end
|
29
|
+
if val and val.length > APP_NAME_MAX_LENGTH
|
30
|
+
record.errors.add attribute, {:message => "The supplied application name is too long. (Max permitted length: #{APP_NAME_MAX_LENGTH} characters)", :exit_code => 105}
|
31
|
+
end
|
32
|
+
Rails.logger.debug "Checking to see if application name is black listed"
|
33
|
+
if OpenShift::ApplicationContainerProxy.blacklisted?(val)
|
34
|
+
record.errors.add attribute, {:message => "The supplied application name is not allowed", :exit_code => 105}
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
validates_each :node_profile, :allow_nil =>true do |record, attribute, val|
|
39
|
+
allowed_sizes=OpenShift::ApplicationContainerProxy.valid_gear_sizes(record.user)
|
40
|
+
unless allowed_sizes.include? val
|
41
|
+
record.errors.add attribute, {:message => "Invalid Size: #{val}. Must be: #{allowed_sizes.join(', ')}. Please contact support for access to additional sizes.", :exit_code => 134}
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
def extended_validator
|
46
|
+
notify_observers(:validate_application)
|
47
|
+
end
|
48
|
+
|
49
|
+
# @param [CloudUser] user
|
50
|
+
# @param [String] app_name Application name
|
51
|
+
# @param [optional, String] uuid Unique identifier for the application
|
52
|
+
# @param [deprecated, String] node_profile Node profile for the first application gear
|
53
|
+
# @param [deprecated, String] framework Cartridge name to use as the framwwork of the application
|
54
|
+
def initialize(user=nil, app_name=nil, uuid=nil, node_profile=nil, framework=nil, template=nil, will_scale=false, domain=nil, init_git_url=nil)
|
55
|
+
self.user = user
|
56
|
+
self.domain = domain
|
57
|
+
self.node_profile = node_profile
|
58
|
+
self.creation_time = DateTime::now().strftime
|
59
|
+
self.uuid = uuid || OpenShift::Model.gen_uuid
|
60
|
+
self.scalable = will_scale
|
61
|
+
self.ngears = 0
|
62
|
+
|
63
|
+
if template.nil?
|
64
|
+
if self.scalable
|
65
|
+
descriptor_hash = YAML.load(template_scalable_app(app_name, framework))
|
66
|
+
from_descriptor(descriptor_hash)
|
67
|
+
self.proxy_cartridge = "haproxy-1.4"
|
68
|
+
else
|
69
|
+
from_descriptor({"Name"=>app_name})
|
70
|
+
self.requires_feature = []
|
71
|
+
self.requires_feature << framework unless framework.nil?
|
72
|
+
end
|
73
|
+
@init_git_url = init_git_url unless init_git_url.nil?
|
74
|
+
else
|
75
|
+
template_descriptor = YAML.load(template.descriptor_yaml)
|
76
|
+
template_descriptor["Name"] = app_name
|
77
|
+
if not template_descriptor["Configure-Order"]
|
78
|
+
requires_list = template_descriptor["Requires"] || []
|
79
|
+
template_descriptor["Configure-Order"] = requires_list
|
80
|
+
end
|
81
|
+
from_descriptor(template_descriptor)
|
82
|
+
@init_git_url = template.git_url
|
83
|
+
end
|
84
|
+
self.categories -= ["cartridge"]
|
85
|
+
end
|
86
|
+
|
87
|
+
def node_profile
|
88
|
+
# node_profile can be nil for older data. Should migrate everything to have a node_profile
|
89
|
+
# with the next major migration. Although technically node_profile shouldn't even be on application.
|
90
|
+
if @node_profile.nil?
|
91
|
+
return DEFAULT_NODE_PROFILE
|
92
|
+
else
|
93
|
+
return @node_profile
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
def add_to_requires_feature(feature)
|
98
|
+
prof = @profile_name_map[@default_profile]
|
99
|
+
if self.scalable
|
100
|
+
# add to the proxy component
|
101
|
+
comp_name = "proxy" if comp_name.nil?
|
102
|
+
prof = @profile_name_map[@default_profile]
|
103
|
+
cinst = ComponentInstance::find_component_in_cart(prof, self, comp_name, self.get_name_prefix)
|
104
|
+
raise OpenShift::NodeException.new("Cannot find component '#{comp_name}' in app #{self.name}.", 135, result_io) if cinst.nil?
|
105
|
+
comp,profile,cart = cinst.get_component_definition(self)
|
106
|
+
raise OpenShift::UserException.new("#{feature} already embedded in '#{@name}'", 136) if comp.depends.include? feature
|
107
|
+
fcart = self.framework
|
108
|
+
conn = OpenShift::Connection.new("#{feature}-web-#{fcart}")
|
109
|
+
conn.components = ["proxy/#{feature}", "web/#{fcart}"]
|
110
|
+
prof.add_connection(conn)
|
111
|
+
conn = OpenShift::Connection.new("#{feature}-proxy-#{fcart}")
|
112
|
+
conn.components = ["proxy/#{feature}", "proxy/#{fcart}"]
|
113
|
+
prof.add_connection(conn)
|
114
|
+
|
115
|
+
# FIXME: Booya - hacks galore -- fix this to be more generic when
|
116
|
+
# scalable apps allow more components in SCALABLE_EMBEDDED_CARTS
|
117
|
+
if feature == "jenkins-client-1.4"
|
118
|
+
conn = OpenShift::Connection.new("#{feature}-proxy-haproxy-1.4")
|
119
|
+
conn.components = ["proxy/#{feature}", "proxy/haproxy-1.4"]
|
120
|
+
prof.add_connection(conn)
|
121
|
+
end
|
122
|
+
|
123
|
+
comp.depends << feature
|
124
|
+
else
|
125
|
+
self.requires_feature.each { |cart|
|
126
|
+
conn = OpenShift::Connection.new("#{feature}-#{cart}")
|
127
|
+
conn.components = [cart, feature]
|
128
|
+
prof.add_connection(conn)
|
129
|
+
}
|
130
|
+
self.requires_feature << feature
|
131
|
+
end
|
132
|
+
end
|
133
|
+
|
134
|
+
def template_scalable_app(app_name, framework)
|
135
|
+
return "
|
136
|
+
Name: #{app_name}
|
137
|
+
Components:
|
138
|
+
proxy:
|
139
|
+
Dependencies: [#{framework}, \"haproxy-1.4\"]
|
140
|
+
web:
|
141
|
+
Dependencies: [#{framework}]
|
142
|
+
Groups:
|
143
|
+
proxy:
|
144
|
+
Components:
|
145
|
+
proxy: proxy
|
146
|
+
web:
|
147
|
+
Components:
|
148
|
+
web: web
|
149
|
+
GroupOverrides:
|
150
|
+
- [\"proxy\", \"proxy/haproxy-1.4\"]
|
151
|
+
- [\"proxy\", \"proxy/#{framework}\"]
|
152
|
+
- [\"web\", \"web/#{framework}\"]
|
153
|
+
Connections:
|
154
|
+
auto-scale:
|
155
|
+
Components: [\"proxy/haproxy-1.4\", \"web/#{framework}\"]
|
156
|
+
proxy-web:
|
157
|
+
Components: [\"proxy/#{framework}\", \"web/#{framework}\"]
|
158
|
+
Configure-Order: [\"proxy/#{framework}\", \"proxy/haproxy-1.4\"]
|
159
|
+
"
|
160
|
+
end
|
161
|
+
|
162
|
+
def remove_from_requires_feature(feature)
|
163
|
+
prof = @profile_name_map[@default_profile]
|
164
|
+
if prof.connection_name_map
|
165
|
+
prof.connection_name_map.delete_if {|k,v| v.components[0].include? feature or v.components[1].include? feature }
|
166
|
+
end
|
167
|
+
if self.scalable
|
168
|
+
comp_name = "proxy" if comp_name.nil?
|
169
|
+
prof = @profile_name_map[@default_profile]
|
170
|
+
cinst = ComponentInstance::find_component_in_cart(prof, self, comp_name, self.get_name_prefix)
|
171
|
+
raise OpenShift::NodeException.new("Cannot find component '#{comp_name}' in app #{self.name}.", 135, result_io) if cinst.nil?
|
172
|
+
comp,profile,cart = cinst.get_component_definition(self)
|
173
|
+
raise OpenShift::UserException.new("#{feature} not embedded in '#{@name}', try adding it first", 135) if not comp.depends.include? feature
|
174
|
+
comp.depends.delete(feature)
|
175
|
+
else
|
176
|
+
self.requires_feature.delete feature
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
# Find an application to which user has access
|
181
|
+
# @param [CloudUser] user
|
182
|
+
# @param [String] app_name
|
183
|
+
# @return [Application]
|
184
|
+
def self.find(user, app_name)
|
185
|
+
return nil if app_name.nil? or app_name.empty?
|
186
|
+
app = nil
|
187
|
+
if user.applications
|
188
|
+
user.applications.each do |next_app|
|
189
|
+
if next_app.name.downcase == app_name.downcase
|
190
|
+
app = next_app
|
191
|
+
break
|
192
|
+
end
|
193
|
+
end
|
194
|
+
else
|
195
|
+
app = super(user.login, app_name)
|
196
|
+
return nil unless app
|
197
|
+
app.user = user
|
198
|
+
app.reset_state
|
199
|
+
end
|
200
|
+
app
|
201
|
+
end
|
202
|
+
|
203
|
+
# Find an applications to which user has access
|
204
|
+
# @param [CloudUser] user
|
205
|
+
# @return [Array<Application>]
|
206
|
+
def self.find_all(user)
|
207
|
+
apps = nil
|
208
|
+
if user.applications
|
209
|
+
apps = user.applications
|
210
|
+
else
|
211
|
+
apps = super(user.login)
|
212
|
+
apps.each do |app|
|
213
|
+
app.user = user
|
214
|
+
app.reset_state
|
215
|
+
end
|
216
|
+
user.applications = apps
|
217
|
+
end
|
218
|
+
apps
|
219
|
+
end
|
220
|
+
|
221
|
+
def self.find_by_gear_uuid(gear_uuid)
|
222
|
+
hash = OpenShift::DataStore.instance.find_by_gear_uuid(gear_uuid)
|
223
|
+
return nil unless hash
|
224
|
+
user = CloudUser.hash_to_obj hash
|
225
|
+
user.applications.each do |next_app|
|
226
|
+
next_app.gears.each do |gear|
|
227
|
+
if gear.uuid == gear_uuid
|
228
|
+
return next_app,gear
|
229
|
+
end
|
230
|
+
end
|
231
|
+
end
|
232
|
+
return nil
|
233
|
+
end
|
234
|
+
|
235
|
+
def self.find_by_uuid(uuid)
|
236
|
+
hash = OpenShift::DataStore.instance.find_by_uuid(self.name,uuid)
|
237
|
+
return nil unless hash
|
238
|
+
user = CloudUser.hash_to_obj hash
|
239
|
+
app = nil
|
240
|
+
user.applications.each do |next_app|
|
241
|
+
if next_app.uuid == uuid
|
242
|
+
app = next_app
|
243
|
+
break
|
244
|
+
end
|
245
|
+
end
|
246
|
+
return app
|
247
|
+
end
|
248
|
+
|
249
|
+
def self.hash_to_obj(hash)
|
250
|
+
domain = nil
|
251
|
+
if hash["domain"]
|
252
|
+
domain = Domain.hash_to_obj(hash["domain"])
|
253
|
+
end
|
254
|
+
app = super(hash)
|
255
|
+
app.domain = domain
|
256
|
+
app
|
257
|
+
end
|
258
|
+
|
259
|
+
# @overload Application.get_available_cartridges(cart_type)
|
260
|
+
# @deprecated
|
261
|
+
# Returns List of names of available cartridges of specified type
|
262
|
+
# @param [String] cart_type Must be "standalone" or "embedded" or nil
|
263
|
+
# @return [Array<String>]
|
264
|
+
# @overload Application.get_available_cartridges
|
265
|
+
# @return [Array<String>]
|
266
|
+
# Returns List of names of all available cartridges
|
267
|
+
def self.get_available_cartridges(cart_type=nil)
|
268
|
+
cart_names = CartridgeCache.cartridge_names(cart_type)
|
269
|
+
end
|
270
|
+
|
271
|
+
# Saves the application object in the datastore
|
272
|
+
def save
|
273
|
+
super(user.login)
|
274
|
+
self.ngears = 0
|
275
|
+
self.usage_records = nil
|
276
|
+
self.destroyed_gears = []
|
277
|
+
end
|
278
|
+
|
279
|
+
# Deletes the application object from the datastore
|
280
|
+
def delete
|
281
|
+
super(user.login)
|
282
|
+
end
|
283
|
+
|
284
|
+
# Processes the application descriptor and creates all the gears necessary to host the application.
|
285
|
+
# Destroys application on all gears if any gear fails
|
286
|
+
# @return [ResultIO]
|
287
|
+
def create
|
288
|
+
result_io = ResultIO.new
|
289
|
+
gears_created = []
|
290
|
+
begin
|
291
|
+
self.node_profile = DEFAULT_NODE_PROFILE unless self.node_profile
|
292
|
+
elaborate_descriptor
|
293
|
+
self.class.notify_observers(:before_application_create, {:application => self, :reply => result_io})
|
294
|
+
if self.scalable
|
295
|
+
raise OpenShift::UserException.new("Scalable app cannot be of type #{UNSCALABLE_FRAMEWORKS.join(' ')}", "108", result_io) if UNSCALABLE_FRAMEWORKS.include? framework
|
296
|
+
min_gear_count = 0
|
297
|
+
group_instances.uniq.each { |gi|
|
298
|
+
min_gear_count += gi.min
|
299
|
+
}
|
300
|
+
if ((user.consumed_gears+min_gear_count) > user.max_gears)
|
301
|
+
raise OpenShift::UserException.new("Creating this application requires #{min_gear_count} gears, and you are using #{user.consumed_gears} of your #{user.max_gears} available gears.", 104)
|
302
|
+
end
|
303
|
+
end
|
304
|
+
user.applications = [] unless user.applications
|
305
|
+
user.applications << self
|
306
|
+
Rails.logger.debug "Creating gears"
|
307
|
+
group_instances.uniq.each do |ginst|
|
308
|
+
create_result, new_gear = ginst.add_gear(self)
|
309
|
+
result_io.append create_result
|
310
|
+
end
|
311
|
+
|
312
|
+
self.gear.name = self.name unless scalable
|
313
|
+
self.class.notify_observers(:application_creation_success, {:application => self, :reply => result_io})
|
314
|
+
rescue Exception => e
|
315
|
+
Rails.logger.debug e.message
|
316
|
+
Rails.logger.debug e.backtrace.join("\n")
|
317
|
+
Rails.logger.debug "Rolling back application gear creation"
|
318
|
+
result_io.append self.destroy(true)
|
319
|
+
self.class.notify_observers(:application_creation_failure, {:application => self, :reply => result_io})
|
320
|
+
raise
|
321
|
+
ensure
|
322
|
+
save
|
323
|
+
end
|
324
|
+
self.class.notify_observers(:after_application_create, {:application => self, :reply => result_io})
|
325
|
+
result_io
|
326
|
+
end
|
327
|
+
|
328
|
+
# Convenience method to cleanup an application
|
329
|
+
def cleanup_and_delete
|
330
|
+
reply = ResultIO.new
|
331
|
+
reply.append self.destroy_dns
|
332
|
+
reply.append self.destroy
|
333
|
+
self.delete
|
334
|
+
reply
|
335
|
+
end
|
336
|
+
|
337
|
+
# Destroys all gears.
|
338
|
+
def destroy(force=false)
|
339
|
+
reply = ResultIO.new
|
340
|
+
self.class.notify_observers(:before_application_destroy, {:application => self, :reply => reply})
|
341
|
+
|
342
|
+
# Only need to destroy if application has been elaborated first
|
343
|
+
unless self.configure_order.nil?
|
344
|
+
# Destroy in the reverse order of configure.
|
345
|
+
group_instances = []
|
346
|
+
self.configure_order.reverse.each do |comp_inst_name|
|
347
|
+
comp_inst = self.comp_instance_map[comp_inst_name]
|
348
|
+
next if comp_inst.parent_cart_name == self.name
|
349
|
+
group_inst = self.group_instance_map[comp_inst.group_instance_name]
|
350
|
+
group_instances.delete(group_inst)
|
351
|
+
group_instances << group_inst
|
352
|
+
end
|
353
|
+
|
354
|
+
failures = []
|
355
|
+
group_instances.each do |group_inst|
|
356
|
+
s,f = run_on_gears(group_inst.gears, reply, false) do |gear, r|
|
357
|
+
r.append group_inst.remove_gear(gear, force)
|
358
|
+
end
|
359
|
+
failures += f
|
360
|
+
end
|
361
|
+
|
362
|
+
begin
|
363
|
+
self.save if self.persisted?
|
364
|
+
rescue Exception => e
|
365
|
+
# pass on failure... because we maybe wanting a delete here instead anyway
|
366
|
+
end
|
367
|
+
|
368
|
+
failures.each do |data|
|
369
|
+
Rails.logger.debug("Unable to clean up application on gear #{data[:gear]} due to exception #{data[:exception].message}")
|
370
|
+
Rails.logger.debug(data[:exception].backtrace.inspect)
|
371
|
+
end
|
372
|
+
|
373
|
+
raise OpenShift::NodeException.new("Could not destroy all gears of application.", 1, reply) if failures.length > 0
|
374
|
+
end
|
375
|
+
self.class.notify_observers(:after_application_destroy, {:application => self, :reply => reply})
|
376
|
+
reply
|
377
|
+
end
|
378
|
+
|
379
|
+
def web_cart
|
380
|
+
return framework
|
381
|
+
end
|
382
|
+
|
383
|
+
def gears
|
384
|
+
self.group_instances.uniq.map{ |ginst| ginst.gears }.flatten
|
385
|
+
end
|
386
|
+
|
387
|
+
def scaleup(comp_name=nil)
|
388
|
+
result_io = ResultIO.new
|
389
|
+
|
390
|
+
if not self.scalable
|
391
|
+
raise OpenShift::UserException.new("Cannot scale a non-scalable application", 255, result_io)
|
392
|
+
end
|
393
|
+
|
394
|
+
comp_name = "web" if comp_name.nil?
|
395
|
+
prof = @profile_name_map[@default_profile]
|
396
|
+
cinst = ComponentInstance::find_component_in_cart(prof, self, comp_name, self.get_name_prefix)
|
397
|
+
raise OpenShift::NodeException.new("Cannot find #{comp_name} in app #{self.name}.", 1, result_io) if cinst.nil?
|
398
|
+
ginst = self.group_instance_map[cinst.group_instance_name]
|
399
|
+
raise OpenShift::NodeException.new("Cannot find group #{cinst.group_instance_name} for #{comp_name} in app #{self.name}.", 1, result_io) if ginst.nil?
|
400
|
+
raise OpenShift::UserException.new("Cannot scale up beyond maximum gear limit in app #{self.name}.", 104, result_io) if ginst.gears.length >= ginst.max and ginst.max > 0
|
401
|
+
raise OpenShift::UserException.new("Cannot scale up beyond gear limit '#{user.max_gears}'", 104, result_io) if user.consumed_gears >= user.max_gears
|
402
|
+
result, new_gear = ginst.add_gear(self)
|
403
|
+
result_io.append result
|
404
|
+
result_io.append self.configure_dependencies
|
405
|
+
self.execute_connections
|
406
|
+
result_io
|
407
|
+
end
|
408
|
+
|
409
|
+
def scaledown(comp_name=nil)
|
410
|
+
result_io = ResultIO.new
|
411
|
+
if not self.scalable
|
412
|
+
raise OpenShift::UserException.new("Cannot scale a non-scalable application", 255, result_io)
|
413
|
+
end
|
414
|
+
comp_name = "web" if comp_name.nil?
|
415
|
+
prof = @profile_name_map[@default_profile]
|
416
|
+
cinst = ComponentInstance::find_component_in_cart(prof, self, comp_name, self.get_name_prefix)
|
417
|
+
raise OpenShift::NodeException.new("Cannot find #{comp_name} in app #{self.name}.", 1, result_io) if cinst.nil?
|
418
|
+
ginst = self.group_instance_map[cinst.group_instance_name]
|
419
|
+
raise OpenShift::NodeException.new("Cannot find group #{cinst.group_instance_name} for #{comp_name} in app #{self.name}.", 1, result_io) if ginst.nil?
|
420
|
+
# remove any gear out of this ginst
|
421
|
+
raise OpenShift::UserException.new("Cannot scale below minimum gear requirements", 1, result_io) if ginst.gears.length <= ginst.min
|
422
|
+
|
423
|
+
gear = ginst.gears.last
|
424
|
+
|
425
|
+
dns = OpenShift::DnsService.instance
|
426
|
+
begin
|
427
|
+
dns.deregister_application(gear.name, @domain.namespace)
|
428
|
+
dns.publish
|
429
|
+
ensure
|
430
|
+
dns.close
|
431
|
+
end
|
432
|
+
|
433
|
+
result_io.append ginst.remove_gear(gear)
|
434
|
+
|
435
|
+
# inform anyone who needs to know that this gear is no more
|
436
|
+
self.configure_dependencies
|
437
|
+
self.execute_connections
|
438
|
+
result_io
|
439
|
+
end
|
440
|
+
|
441
|
+
# Elaborates the descriptor, configures cartridges that were added to the application dependencies.
|
442
|
+
# If a node is empty after removing components, then the gear is destroyed. Errors that occur while removing cartridges are logged but no exception is thrown.
|
443
|
+
# If an error occurs while configuring a cartridge, then the cartirdge is deconfigures on all nodes and an exception is thrown.
|
444
|
+
def configure_dependencies
|
445
|
+
reply = ResultIO.new
|
446
|
+
self.class.notify_observers(:before_application_configure, {:application => self, :reply => reply})
|
447
|
+
|
448
|
+
elaborate_descriptor
|
449
|
+
|
450
|
+
exceptions = []
|
451
|
+
Rails.logger.debug "Configure order is #{self.configure_order.inspect}"
|
452
|
+
#process new additions
|
453
|
+
#TODO: fix configure after framework cartridge is no longer a requirement for adding embedded cartridges
|
454
|
+
self.configure_order.each do |comp_inst_name|
|
455
|
+
comp_inst = self.comp_instance_map[comp_inst_name]
|
456
|
+
next if comp_inst.parent_cart_name == self.name
|
457
|
+
group_inst = self.group_instance_map[comp_inst.group_instance_name]
|
458
|
+
begin
|
459
|
+
group_inst.fulfil_requirements(self)
|
460
|
+
run_on_gears(group_inst.get_unconfigured_gears(comp_inst), reply) do |gear, r|
|
461
|
+
doExpose = false
|
462
|
+
if self.scalable and comp_inst.parent_cart_name!=self.proxy_cartridge
|
463
|
+
doExpose = true if not gear.configured_components.include? comp_inst.name
|
464
|
+
end
|
465
|
+
r.append gear.configure(comp_inst, @init_git_url)
|
466
|
+
begin
|
467
|
+
r.append gear.expose_port(comp_inst) if doExpose
|
468
|
+
rescue Exception=>e
|
469
|
+
end
|
470
|
+
process_cartridge_commands(r)
|
471
|
+
end
|
472
|
+
rescue Exception => e
|
473
|
+
Rails.logger.debug e.message
|
474
|
+
Rails.logger.debug e.backtrace.inspect
|
475
|
+
|
476
|
+
if e.kind_of?(OpenShift::GearsException)
|
477
|
+
successful_gears = []
|
478
|
+
successful_gears = e.successful.map{|g| g[:gear]} if e.successful
|
479
|
+
failed_gears = []
|
480
|
+
failed_gears = e.failed.map{|g| g[:gear]} if e.failed
|
481
|
+
gear_exception = e.exception
|
482
|
+
|
483
|
+
#remove failed component from all gears
|
484
|
+
run_on_gears(successful_gears, reply, false) do |gear, r|
|
485
|
+
r.append gear.deconfigure(comp_inst)
|
486
|
+
process_cartridge_commands(r)
|
487
|
+
end
|
488
|
+
run_on_gears(failed_gears, reply, false) do |gear, r|
|
489
|
+
r.append gear.deconfigure(comp_inst, true)
|
490
|
+
process_cartridge_commands(r)
|
491
|
+
end
|
492
|
+
else
|
493
|
+
gear_exception = e
|
494
|
+
end
|
495
|
+
|
496
|
+
# destroy any unused gears
|
497
|
+
# TODO : if the destroy fails below... the user still sees the error as configure failure
|
498
|
+
# Then to recover, if we re-elaborate (like in add_dependency), then the group instance will get lost
|
499
|
+
# and any failed gears below will leak (i.e. they exist on node, their destroy failed, but they do not have any handle in Mongo)
|
500
|
+
run_on_gears(group_inst.gears, reply, false) do |gear, r|
|
501
|
+
r.append group_inst.remove_gear(gear) if gear.configured_components.length == 0
|
502
|
+
end
|
503
|
+
|
504
|
+
self.save
|
505
|
+
exceptions << gear_exception
|
506
|
+
end
|
507
|
+
end
|
508
|
+
|
509
|
+
unless exceptions.empty?
|
510
|
+
raise exceptions.first
|
511
|
+
end
|
512
|
+
|
513
|
+
self.save
|
514
|
+
self.class.notify_observers(:after_application_configure, {:application => self, :reply => reply})
|
515
|
+
reply
|
516
|
+
end
|
517
|
+
|
518
|
+
def execute_connections
|
519
|
+
return if not self.scalable
|
520
|
+
|
521
|
+
self.conn_endpoints_list.each { |conn|
|
522
|
+
pub_inst = self.comp_instance_map[conn.from_comp_inst]
|
523
|
+
pub_ginst = self.group_instance_map[pub_inst.group_instance_name]
|
524
|
+
|
525
|
+
tag = ""
|
526
|
+
handle = RemoteJob.create_parallel_job
|
527
|
+
RemoteJob.run_parallel_on_gears(pub_ginst.gears, handle) { |exec_handle, gear|
|
528
|
+
appname = gear.name
|
529
|
+
connector_name = conn.from_connector.name
|
530
|
+
cart = pub_inst.parent_cart_name
|
531
|
+
input_args = [appname, self.domain.namespace, gear.uuid]
|
532
|
+
|
533
|
+
job = gear.get_execute_connector_job(cart, connector_name, input_args)
|
534
|
+
RemoteJob.add_parallel_job(exec_handle, tag, gear, job)
|
535
|
+
}
|
536
|
+
pub_out = []
|
537
|
+
RemoteJob.get_parallel_run_results(handle) { |tag, gear, output, status|
|
538
|
+
if status==0
|
539
|
+
pub_out.push("'#{gear}'='#{output}'")
|
540
|
+
end
|
541
|
+
}
|
542
|
+
input_to_subscriber = Shellwords::shellescape(pub_out.join(' '))
|
543
|
+
Rails.logger.debug "Output of publisher - '#{pub_out}'"
|
544
|
+
|
545
|
+
sub_inst = self.comp_instance_map[conn.to_comp_inst]
|
546
|
+
sub_ginst = self.group_instance_map[sub_inst.group_instance_name]
|
547
|
+
handle = RemoteJob.create_parallel_job
|
548
|
+
RemoteJob.run_parallel_on_gears(sub_ginst.gears, handle) { |exec_handle, gear|
|
549
|
+
appname = gear.name
|
550
|
+
connector_name = conn.to_connector.name
|
551
|
+
cart = sub_inst.parent_cart_name
|
552
|
+
input_args = [appname, self.domain.namespace, gear.uuid, input_to_subscriber]
|
553
|
+
|
554
|
+
job = gear.get_execute_connector_job(cart, connector_name, input_args)
|
555
|
+
RemoteJob.add_parallel_job(exec_handle, tag, gear, job)
|
556
|
+
}
|
557
|
+
# we dont care about subscriber's output/status
|
558
|
+
}
|
559
|
+
end
|
560
|
+
|
561
|
+
# Start a particular dependency on all gears that host it.
|
562
|
+
# If unable to start a component, the application is stopped on all gears
|
563
|
+
# @param [String] dependency Name of a cartridge to start. Set to nil for all dependencies.
|
564
|
+
# @param [Boolean] force_stop_on_failure
|
565
|
+
def start(dependency=nil, stop_on_failure=true)
|
566
|
+
reply = ResultIO.new
|
567
|
+
self.class.notify_observers(:before_start, {:application => self, :reply => reply, :dependency => dependency})
|
568
|
+
self.start_order.each do |comp_inst_name|
|
569
|
+
comp_inst = self.comp_instance_map[comp_inst_name]
|
570
|
+
next if !dependency.nil? and (comp_inst.parent_cart_name != dependency)
|
571
|
+
next if comp_inst.parent_cart_name == self.name
|
572
|
+
|
573
|
+
begin
|
574
|
+
group_inst = self.group_instance_map[comp_inst.group_instance_name]
|
575
|
+
run_on_gears(group_inst.gears, reply) do |gear, r|
|
576
|
+
r.append gear.start(comp_inst)
|
577
|
+
end
|
578
|
+
rescue Exception => e
|
579
|
+
gear_exception = e.message[:exception]
|
580
|
+
self.stop(dependency,false,false) if stop_on_failure
|
581
|
+
raise gear_exception
|
582
|
+
end
|
583
|
+
end
|
584
|
+
self.class.notify_observers(:after_start, {:application => self, :reply => reply, :dependency => dependency})
|
585
|
+
reply
|
586
|
+
end
|
587
|
+
|
588
|
+
# Stop a particular dependency on all gears that host it.
|
589
|
+
# @param [String] dependency Name of a cartridge to start. Set to nil for all dependencies.
|
590
|
+
# @param [Boolean] force_stop_on_failure
|
591
|
+
# @param [Boolean] throw_exception_on_failure
|
592
|
+
def stop(dependency=nil, force_stop_on_failure=true, throw_exception_on_failure=true)
|
593
|
+
reply = ResultIO.new
|
594
|
+
self.class.notify_observers(:before_stop, {:application => self, :reply => reply, :dependency => dependency})
|
595
|
+
self.start_order.reverse.each do |comp_inst_name|
|
596
|
+
comp_inst = self.comp_instance_map[comp_inst_name]
|
597
|
+
next if !dependency.nil? and (comp_inst.parent_cart_name != dependency)
|
598
|
+
next if comp_inst.parent_cart_name == self.name
|
599
|
+
|
600
|
+
group_inst = self.group_instance_map[comp_inst.group_instance_name]
|
601
|
+
s,f = run_on_gears(group_inst.gears, reply, false) do |gear, r|
|
602
|
+
r.append gear.stop(comp_inst)
|
603
|
+
end
|
604
|
+
|
605
|
+
if(f.length > 0)
|
606
|
+
self.force_stop(dependency,false) if(force_stop_on_failure)
|
607
|
+
raise f[0][:exception] if(throw_exception_on_failure)
|
608
|
+
end
|
609
|
+
end
|
610
|
+
self.class.notify_observers(:after_stop, {:application => self, :reply => reply, :dependency => dependency})
|
611
|
+
reply
|
612
|
+
end
|
613
|
+
|
614
|
+
# Force stop a particular dependency on all gears that host it.
|
615
|
+
# @param [String] dependency Name of a cartridge to stop. Set to nil for all dependencies.
|
616
|
+
# @param [Boolean] throw_exception_on_failure
|
617
|
+
def force_stop(dependency=nil, throw_exception_on_failure=true)
|
618
|
+
reply = ResultIO.new
|
619
|
+
self.class.notify_observers(:before_force_stop, {:application => self, :reply => reply, :dependency => dependency})
|
620
|
+
self.start_order.each do |comp_inst_name|
|
621
|
+
comp_inst = self.comp_instance_map[comp_inst_name]
|
622
|
+
next if !dependency.nil? and (comp_inst.parent_cart_name != dependency)
|
623
|
+
|
624
|
+
group_inst = self.group_instance_map[comp_inst.group_instance_name]
|
625
|
+
s,f = run_on_gears(group_inst.gears, reply, false) do |gear, r|
|
626
|
+
r.append gear.force_stop(comp_inst)
|
627
|
+
end
|
628
|
+
|
629
|
+
raise f[0][:exception] if(f.length > 0 and throw_exception_on_failure)
|
630
|
+
end
|
631
|
+
self.class.notify_observers(:after_force_stop, {:application => self, :reply => reply, :dependency => dependency})
|
632
|
+
reply
|
633
|
+
end
|
634
|
+
|
635
|
+
# Restart a particular dependency on all gears that host it.
|
636
|
+
# @param [String] dependency Name of a cartridge to restart. Set to nil for all dependencies.
|
637
|
+
def restart(dependency=nil)
|
638
|
+
reply = ResultIO.new
|
639
|
+
self.class.notify_observers(:before_restart, {:application => self, :reply => reply, :dependency => dependency})
|
640
|
+
self.start_order.each do |comp_inst_name|
|
641
|
+
comp_inst = self.comp_instance_map[comp_inst_name]
|
642
|
+
next if !dependency.nil? and (comp_inst.parent_cart_name != dependency)
|
643
|
+
|
644
|
+
group_inst = self.group_instance_map[comp_inst.group_instance_name]
|
645
|
+
s,f = run_on_gears(group_inst.gears, reply, false) do |gear, r|
|
646
|
+
r.append gear.restart(comp_inst)
|
647
|
+
end
|
648
|
+
|
649
|
+
raise f[0][:exception] if(f.length > 0)
|
650
|
+
end
|
651
|
+
self.class.notify_observers(:after_restart, {:application => self, :reply => reply, :dependency => dependency})
|
652
|
+
reply
|
653
|
+
end
|
654
|
+
|
655
|
+
# Reload a particular dependency on all gears that host it.
|
656
|
+
# @param [String] dependency Name of a cartridge to reload. Set to nil for all dependencies.
|
657
|
+
def reload(dependency=nil)
|
658
|
+
dependency = self.framework if dependency.nil?
|
659
|
+
reply = ResultIO.new
|
660
|
+
self.class.notify_observers(:before_reload, {:application => self, :reply => reply, :dependency => dependency})
|
661
|
+
self.start_order.each do |comp_inst_name|
|
662
|
+
comp_inst = self.comp_instance_map[comp_inst_name]
|
663
|
+
next if !dependency.nil? and (comp_inst.parent_cart_name != dependency)
|
664
|
+
|
665
|
+
group_inst = self.group_instance_map[comp_inst.group_instance_name]
|
666
|
+
s,f = run_on_gears(group_inst.gears, reply, false) do |gear, r|
|
667
|
+
r.append gear.reload(comp_inst)
|
668
|
+
end
|
669
|
+
|
670
|
+
raise f[0][:exception] if(f.length > 0)
|
671
|
+
end
|
672
|
+
self.class.notify_observers(:after_reload, {:application => self, :reply => reply, :dependency => dependency})
|
673
|
+
reply
|
674
|
+
end
|
675
|
+
|
676
|
+
# Retrieves status for a particular dependency on all gears that host it.
|
677
|
+
# @param [String] dependency Name of a cartridge
|
678
|
+
def status(dependency=nil, ret_reply=true)
|
679
|
+
reply = ResultIO.new
|
680
|
+
app_status = []
|
681
|
+
tag = ""
|
682
|
+
handle = RemoteJob.create_parallel_job
|
683
|
+
|
684
|
+
self.comp_instance_map.each do |comp_inst_name, comp_inst|
|
685
|
+
next if !dependency.nil? and (comp_inst.parent_cart_name != dependency)
|
686
|
+
next if comp_inst.parent_cart_name == self.name
|
687
|
+
|
688
|
+
group_inst = self.group_instance_map[comp_inst.group_instance_name]
|
689
|
+
group_inst.gears.each do |gear|
|
690
|
+
job = gear.status_job(comp_inst)
|
691
|
+
RemoteJob.add_parallel_job(handle, tag, gear, job)
|
692
|
+
end
|
693
|
+
end
|
694
|
+
if RemoteJob.has_jobs(handle)
|
695
|
+
RemoteJob.run_parallel_on_gears([], handle) { }
|
696
|
+
RemoteJob.get_parallel_run_results(handle) { |tag, gear, output, rc|
|
697
|
+
if rc != 0
|
698
|
+
Rails.logger.error "Error: Getting '#{dependency}' status from gear '#{gear}', errcode: '#{rc}' and output: #{output}"
|
699
|
+
raise OpenShift::UserException.new("Error: Getting '#{dependency}' status from gear '#{gear}', errcode: '#{rc}' and output: #{output}", 143)
|
700
|
+
else
|
701
|
+
r = ResultIO.new
|
702
|
+
r.resultIO << "#{output}\n"
|
703
|
+
reply.append r
|
704
|
+
app_status.push({"gear_id" => gear, "message" => output}) unless ret_reply
|
705
|
+
end
|
706
|
+
}
|
707
|
+
end
|
708
|
+
if ret_reply
|
709
|
+
return reply
|
710
|
+
else
|
711
|
+
return app_status
|
712
|
+
end
|
713
|
+
end
|
714
|
+
|
715
|
+
# Invokes tidy for a particular dependency on all gears that host it.
|
716
|
+
# @param [String] dependency Name of a cartridge
|
717
|
+
def tidy(dependency=nil)
|
718
|
+
dependency = self.framework if dependency.nil?
|
719
|
+
reply = ResultIO.new
|
720
|
+
self.comp_instance_map.each do |comp_inst_name, comp_inst|
|
721
|
+
next if !dependency.nil? and (comp_inst.parent_cart_name != dependency)
|
722
|
+
|
723
|
+
group_inst = self.group_instance_map[comp_inst.group_instance_name]
|
724
|
+
s,f = run_on_gears(group_inst.gears, reply, false) do |gear, r|
|
725
|
+
r.append gear.tidy(comp_inst)
|
726
|
+
end
|
727
|
+
|
728
|
+
raise f[0][:exception] if(f.length > 0)
|
729
|
+
end
|
730
|
+
reply
|
731
|
+
end
|
732
|
+
|
733
|
+
# Invokes threaddump for a particular dependency on all gears that host it.
|
734
|
+
# @param [String] dependency Name of a cartridge
|
735
|
+
def threaddump(dependency=nil)
|
736
|
+
reply = ResultIO.new
|
737
|
+
self.comp_instance_map.each do |comp_inst_name, comp_inst|
|
738
|
+
next if !dependency.nil? and (comp_inst.parent_cart_name != dependency)
|
739
|
+
|
740
|
+
group_inst = self.group_instance_map[comp_inst.group_instance_name]
|
741
|
+
s,f = run_on_gears(group_inst.gears, reply, false) do |gear, r|
|
742
|
+
r.append gear.threaddump(comp_inst)
|
743
|
+
end
|
744
|
+
|
745
|
+
raise f[0][:exception] if(f.length > 0)
|
746
|
+
end
|
747
|
+
reply
|
748
|
+
end
|
749
|
+
|
750
|
+
# Invokes system_messages for a particular dependency on all gears that host it.
|
751
|
+
# @param [String] dependency Name of a cartridge
|
752
|
+
def system_messages(dependency=nil)
|
753
|
+
reply = ResultIO.new
|
754
|
+
self.comp_instance_map.each do |comp_inst_name, comp_inst|
|
755
|
+
next if !dependency.nil? and (comp_inst.parent_cart_name != dependency)
|
756
|
+
|
757
|
+
group_inst = self.group_instance_map[comp_inst.group_instance_name]
|
758
|
+
s,f = run_on_gears(group_inst.gears, reply, false) do |gear, r|
|
759
|
+
r.append gear.system_messages(comp_inst)
|
760
|
+
end
|
761
|
+
|
762
|
+
raise f[0][:exception] if(f.length > 0)
|
763
|
+
end
|
764
|
+
reply
|
765
|
+
end
|
766
|
+
|
767
|
+
# Invokes expose_port for a particular dependency on all gears that host it.
|
768
|
+
# @param [String] dependency Name of a cartridge
|
769
|
+
def expose_port(dependency=nil)
|
770
|
+
reply = ResultIO.new
|
771
|
+
self.comp_instance_map.each do |comp_inst_name, comp_inst|
|
772
|
+
next if !dependency.nil? and (comp_inst.parent_cart_name != dependency)
|
773
|
+
next if comp_inst.name == "@@app"
|
774
|
+
next if comp_inst.parent_cart_name == self.name
|
775
|
+
next if comp_inst.parent_cart_name == self.proxy_cartridge
|
776
|
+
|
777
|
+
group_inst = self.group_instance_map[comp_inst.group_instance_name]
|
778
|
+
s,f = run_on_gears(group_inst.gears, reply, false) do |gear, r|
|
779
|
+
r.append gear.expose_port(comp_inst)
|
780
|
+
end
|
781
|
+
|
782
|
+
# Not all cartridges will have this hook.
|
783
|
+
f.each do |fail|
|
784
|
+
next if fail[:exception].resultIO.exitcode == 127
|
785
|
+
raise fail[:exception]
|
786
|
+
end
|
787
|
+
|
788
|
+
end
|
789
|
+
reply
|
790
|
+
end
|
791
|
+
|
792
|
+
def conceal_port(dependency=nil)
|
793
|
+
reply = ResultIO.new
|
794
|
+
self.comp_instance_map.each do |comp_inst_name, comp_inst|
|
795
|
+
next if !dependency.nil? and (comp_inst.parent_cart_name != dependency)
|
796
|
+
next if comp_inst.name == "@@app"
|
797
|
+
next if comp_inst.parent_cart_name == self.name
|
798
|
+
next if comp_inst.parent_cart_name == self.proxy_cartridge
|
799
|
+
|
800
|
+
group_inst = self.group_instance_map[comp_inst.group_instance_name]
|
801
|
+
s,f = run_on_gears(group_inst.gears, reply, false) do |gear, r|
|
802
|
+
r.append gear.conceal_port(comp_inst)
|
803
|
+
end
|
804
|
+
|
805
|
+
# Not all cartridges will have this hook.
|
806
|
+
f.each do |fail|
|
807
|
+
next if fail[:exception].resultIO.exitcode == 127
|
808
|
+
raise fail[:exception]
|
809
|
+
end
|
810
|
+
|
811
|
+
end
|
812
|
+
reply
|
813
|
+
end
|
814
|
+
|
815
|
+
def show_port(dependency=nil)
|
816
|
+
reply = ResultIO.new
|
817
|
+
self.comp_instance_map.each do |comp_inst_name, comp_inst|
|
818
|
+
next if !dependency.nil? and (comp_inst.parent_cart_name != dependency)
|
819
|
+
next if comp_inst.name == "@@app"
|
820
|
+
next if comp_inst.parent_cart_name == self.name
|
821
|
+
next if comp_inst.parent_cart_name == self.proxy_cartridge
|
822
|
+
|
823
|
+
Rails.logger.debug( comp_inst.inspect )
|
824
|
+
Rails.logger.debug( "\n" )
|
825
|
+
|
826
|
+
group_inst = self.group_instance_map[comp_inst.group_instance_name]
|
827
|
+
s,f = run_on_gears(group_inst.gears, reply, false) do |gear, r|
|
828
|
+
r.append gear.show_port(comp_inst)
|
829
|
+
end
|
830
|
+
|
831
|
+
# Not all cartridges will have this hook.
|
832
|
+
f.each do |fail|
|
833
|
+
next if fail[:exception].resultIO.exitcode == 127
|
834
|
+
raise fail[:exception]
|
835
|
+
end
|
836
|
+
|
837
|
+
end
|
838
|
+
reply
|
839
|
+
end
|
840
|
+
|
841
|
+
# Get the state of the application on all gears.
|
842
|
+
def show_state()
|
843
|
+
gear_states = {}
|
844
|
+
tag = ""
|
845
|
+
handle = RemoteJob.create_parallel_job
|
846
|
+
RemoteJob.run_parallel_on_gears(self.gears, handle) { |exec_handle, gear|
|
847
|
+
job = gear.app_state_job_show()
|
848
|
+
RemoteJob.add_parallel_job(exec_handle, tag, gear, job)
|
849
|
+
}
|
850
|
+
RemoteJob.get_parallel_run_results(handle) { |tag, gear, output, status|
|
851
|
+
if status != 0
|
852
|
+
Rails.logger.error("Error getting application state from gear: '#{gear}' with status: '#{status}' and output: #{output}")
|
853
|
+
gear_states[gear] = 'unknown'
|
854
|
+
else
|
855
|
+
gear_states[gear] = output
|
856
|
+
end
|
857
|
+
}
|
858
|
+
gear_states
|
859
|
+
end
|
860
|
+
|
861
|
+
def add_node_settings(gears=nil)
|
862
|
+
reply = ResultIO.new
|
863
|
+
|
864
|
+
gears = self.gears unless gears
|
865
|
+
|
866
|
+
self.ssh_keys = {} unless self.ssh_keys
|
867
|
+
if @user.env_vars || @user.ssh_keys || @user.system_ssh_keys
|
868
|
+
tag = ""
|
869
|
+
handle = RemoteJob.create_parallel_job
|
870
|
+
RemoteJob.run_parallel_on_gears(gears, handle) { |exec_handle, gear|
|
871
|
+
@user.env_vars.each do |key, value|
|
872
|
+
job = gear.env_var_job_add(key, value)
|
873
|
+
RemoteJob.add_parallel_job(exec_handle, tag, gear, job)
|
874
|
+
end if @user.env_vars
|
875
|
+
@user.ssh_keys.each do |key_name, key_info|
|
876
|
+
job = gear.ssh_key_job_add(key_info["key"], key_info["type"], key_name)
|
877
|
+
RemoteJob.add_parallel_job(exec_handle, tag, gear, job)
|
878
|
+
end if @user.ssh_keys
|
879
|
+
@user.system_ssh_keys.each do |key_name, key_info|
|
880
|
+
job = gear.ssh_key_job_add(key_info, nil, key_name)
|
881
|
+
RemoteJob.add_parallel_job(exec_handle, tag, gear, job)
|
882
|
+
end if @user.system_ssh_keys
|
883
|
+
self.ssh_keys.each do |key_name, key_info|
|
884
|
+
job = gear.ssh_key_job_add(key_info, nil, key_name)
|
885
|
+
RemoteJob.add_parallel_job(exec_handle, tag, gear, job)
|
886
|
+
end
|
887
|
+
}
|
888
|
+
RemoteJob.get_parallel_run_results(handle) { |tag, gear, output, status|
|
889
|
+
if status != 0
|
890
|
+
raise OpenShift::NodeException.new("Error applying settings to gear: #{gear} with status: #{status} and output: #{output}", 143)
|
891
|
+
end
|
892
|
+
}
|
893
|
+
end
|
894
|
+
reply
|
895
|
+
end
|
896
|
+
|
897
|
+
def add_dns(appname, namespace, public_hostname)
|
898
|
+
dns = OpenShift::DnsService.instance
|
899
|
+
begin
|
900
|
+
dns.register_application(appname, namespace, public_hostname)
|
901
|
+
dns.publish
|
902
|
+
ensure
|
903
|
+
dns.close
|
904
|
+
end
|
905
|
+
end
|
906
|
+
|
907
|
+
def create_dns
|
908
|
+
reply = ResultIO.new
|
909
|
+
self.class.notify_observers(:before_create_dns, {:application => self, :reply => reply})
|
910
|
+
public_hostname = self.container.get_public_hostname
|
911
|
+
|
912
|
+
add_dns(@name, @domain.namespace, public_hostname)
|
913
|
+
|
914
|
+
self.class.notify_observers(:after_create_dns, {:application => self, :reply => reply})
|
915
|
+
reply
|
916
|
+
end
|
917
|
+
|
918
|
+
def destroy_dns
|
919
|
+
reply = ResultIO.new
|
920
|
+
self.class.notify_observers(:before_destroy_dns, {:application => self, :reply => reply})
|
921
|
+
dns = OpenShift::DnsService.instance
|
922
|
+
begin
|
923
|
+
dns.deregister_application(@name,@domain.namespace)
|
924
|
+
if self.scalable
|
925
|
+
# find the group instance where the web-cartridge is residing
|
926
|
+
self.group_instance_map.keys.each { |ginst_name|
|
927
|
+
ginst = self.group_instance_map[ginst_name]
|
928
|
+
ginst.gears.each { |gear|
|
929
|
+
dns.deregister_application(gear.name,@domain.namespace)
|
930
|
+
}
|
931
|
+
}
|
932
|
+
end
|
933
|
+
dns.publish
|
934
|
+
ensure
|
935
|
+
dns.close
|
936
|
+
end
|
937
|
+
self.class.notify_observers(:after_destroy_dns, {:application => self, :reply => reply})
|
938
|
+
reply
|
939
|
+
end
|
940
|
+
|
941
|
+
def recreate_dns
|
942
|
+
reply = ResultIO.new
|
943
|
+
self.class.notify_observers(:before_recreate_dns, {:application => self, :reply => reply})
|
944
|
+
dns = OpenShift::DnsService.instance
|
945
|
+
begin
|
946
|
+
public_hostname = self.container.get_public_hostname
|
947
|
+
dns.modify_application(@name, @domain.namespace, public_hostname)
|
948
|
+
dns.publish
|
949
|
+
ensure
|
950
|
+
dns.close
|
951
|
+
end
|
952
|
+
self.class.notify_observers(:after_recreate_dns, {:application => self, :reply => reply})
|
953
|
+
reply
|
954
|
+
end
|
955
|
+
|
956
|
+
def get_user_min_max(cart_group_map)
|
957
|
+
sup_min = 0
|
958
|
+
sup_max = nil
|
959
|
+
cart_current_min = 0
|
960
|
+
cart_current_max = nil
|
961
|
+
cart_group_map.each do |group_name, component_instance_list|
|
962
|
+
ginst = self.group_instance_map[group_name]
|
963
|
+
sup_min += ginst.supported_min
|
964
|
+
cart_current_min += ginst.min
|
965
|
+
if sup_max.nil? or ginst.supported_max==-1
|
966
|
+
sup_max = ginst.supported_max
|
967
|
+
else
|
968
|
+
sup_max += ginst.supported_max unless sup_max==-1
|
969
|
+
end
|
970
|
+
if cart_current_max.nil? or ginst.max==-1
|
971
|
+
cart_current_max = ginst.max
|
972
|
+
else
|
973
|
+
cart_current_max += ginst.max unless cart_current_max==-1
|
974
|
+
end
|
975
|
+
end
|
976
|
+
return cart_current_min, cart_current_max, sup_min, sup_max
|
977
|
+
end
|
978
|
+
|
979
|
+
def set_user_min_max(cart_group_map, min_scale, max_scale)
|
980
|
+
if min_scale and max_scale and Integer(min_scale) > Integer(max_scale) and Integer(max_scale)!=-1
|
981
|
+
#raise OpenShift::UserException.new("Invalid scaling factors provided. Minimum (#{min_scale}) should always be less than maximum (#{max_scale}).", 170)
|
982
|
+
tmp = min_scale
|
983
|
+
min_scale = max_scale
|
984
|
+
max_scale = tmp
|
985
|
+
end
|
986
|
+
cart_current_min, cart_current_max, sup_min, sup_max = get_user_min_max(cart_group_map)
|
987
|
+
if min_scale and Integer(min_scale)-cart_current_min<0
|
988
|
+
# set min first
|
989
|
+
set_user_min(cart_group_map, min_scale)
|
990
|
+
set_user_max(cart_group_map, max_scale)
|
991
|
+
else
|
992
|
+
set_user_max(cart_group_map, max_scale)
|
993
|
+
set_user_min(cart_group_map, min_scale)
|
994
|
+
end
|
995
|
+
|
996
|
+
if self.scalable
|
997
|
+
prof = @profile_name_map[@default_profile]
|
998
|
+
cinst = ComponentInstance::find_component_in_cart(prof, self, self.proxy_cartridge, self.get_name_prefix)
|
999
|
+
if cinst
|
1000
|
+
group_inst = self.group_instance_map[cinst.group_instance_name]
|
1001
|
+
reply = ResultIO.new
|
1002
|
+
s,f = run_on_gears(group_inst.gears, reply, false) do |gear, r|
|
1003
|
+
gear.reload(cinst)
|
1004
|
+
end
|
1005
|
+
end
|
1006
|
+
end
|
1007
|
+
|
1008
|
+
end
|
1009
|
+
|
1010
|
+
def set_user_min(cart_group_map, min_scale)
|
1011
|
+
return if not min_scale
|
1012
|
+
cart_current_min, cart_current_max, sup_min, sup_max = get_user_min_max(cart_group_map)
|
1013
|
+
cart_current_max = 1000000 if cart_current_max==-1
|
1014
|
+
if (Integer(min_scale) < sup_min or Integer(min_scale) > cart_current_max)
|
1015
|
+
raise OpenShift::UserException.new("Invalid scales_from factor #{min_scale} provided. Value out of allowed range ( #{sup_min} : #{cart_current_max==1000000 ? -1 : cart_current_max} ).", 168)
|
1016
|
+
end
|
1017
|
+
target_min = Integer(min_scale) - cart_current_min
|
1018
|
+
iter = cart_group_map.keys.each
|
1019
|
+
while target_min != 0 do
|
1020
|
+
begin
|
1021
|
+
group_name = iter.next
|
1022
|
+
break if group_name.nil?
|
1023
|
+
rescue Exception=>e
|
1024
|
+
break
|
1025
|
+
end
|
1026
|
+
ginst = self.group_instance_map[group_name]
|
1027
|
+
ginst_max = ginst.max
|
1028
|
+
ginst_max = 1000000 if ginst.max==-1
|
1029
|
+
if target_min > 0
|
1030
|
+
if (ginst_max-ginst.min)>target_min
|
1031
|
+
ginst.min += target_min
|
1032
|
+
target_min = 0
|
1033
|
+
else
|
1034
|
+
target_min -= (ginst_max-ginst.min)
|
1035
|
+
ginst.min = ginst_max
|
1036
|
+
end
|
1037
|
+
else
|
1038
|
+
if (ginst.supported_min-ginst.min) < target_min
|
1039
|
+
ginst.min += target_min
|
1040
|
+
target_min = 0
|
1041
|
+
else
|
1042
|
+
target_min += (ginst.min-ginst.supported_min)
|
1043
|
+
ginst.min = ginst.supported_min
|
1044
|
+
end
|
1045
|
+
end
|
1046
|
+
end
|
1047
|
+
self.save
|
1048
|
+
if target_min != 0
|
1049
|
+
raise OpenShift::UserException.new("Could not completely distribute scales_from to all groups. Value constrained to #{Integer(min_scale)-target_min}", 169)
|
1050
|
+
end
|
1051
|
+
end
|
1052
|
+
|
1053
|
+
def set_user_max(cart_group_map, max_scale)
|
1054
|
+
return if not max_scale
|
1055
|
+
cart_current_min, cart_current_max, sup_min, sup_max = get_user_min_max(cart_group_map)
|
1056
|
+
sup_max = 1000000 if sup_max==-1
|
1057
|
+
max_scale_int = Integer(max_scale)
|
1058
|
+
max_scale_int = 1000000 if max_scale_int==-1
|
1059
|
+
if (max_scale_int and ( max_scale_int > sup_max or max_scale_int < cart_current_min) )
|
1060
|
+
raise OpenShift::UserException.new("Invalid scales_to factor #{max_scale} provided. Value out of allowed range ( #{cart_current_min} : #{sup_max==1000000 ? -1 : sup_max} ).", 168)
|
1061
|
+
end
|
1062
|
+
target_max = Integer(max_scale)
|
1063
|
+
cart_group_map.keys.each { |group_name, component_instances|
|
1064
|
+
gi = self.group_instance_map[group_name]
|
1065
|
+
if target_max==-1
|
1066
|
+
next if gi.supported_max!=-1
|
1067
|
+
gi.max = target_max
|
1068
|
+
break
|
1069
|
+
end
|
1070
|
+
if gi.supported_max==-1 or( (gi.supported_max-gi.min) > target_max )
|
1071
|
+
rest_total = 0
|
1072
|
+
cart_group_map.keys.each { |other_group_name|
|
1073
|
+
next if other_group_name==group_name
|
1074
|
+
other_gi = self.group_instance_map[other_group_name]
|
1075
|
+
if other_gi.max == -1
|
1076
|
+
other_gi.max==other_gi.min
|
1077
|
+
end
|
1078
|
+
rest_total += other_gi.max
|
1079
|
+
}
|
1080
|
+
gi.max = (target_max-rest_total)
|
1081
|
+
break
|
1082
|
+
end
|
1083
|
+
}
|
1084
|
+
self.save
|
1085
|
+
end
|
1086
|
+
|
1087
|
+
def prepare_namespace_update(dns_service, new_ns, old_ns)
|
1088
|
+
updated = true
|
1089
|
+
result_io = ResultIO.new
|
1090
|
+
begin
|
1091
|
+
self.gears.each do |gear|
|
1092
|
+
gear_result_io = gear.prepare_namespace_update(dns_service, new_ns, old_ns)
|
1093
|
+
updated = false unless gear_result_io.exitcode == 0
|
1094
|
+
result_io.append gear_result_io
|
1095
|
+
end
|
1096
|
+
rescue Exception => e
|
1097
|
+
updated = false
|
1098
|
+
Rails.logger.debug "Exception caught updating namespace: #{e.message}"
|
1099
|
+
Rails.logger.debug e.backtrace
|
1100
|
+
result_io.append e.resultIO if e.respond_to?('resultIO') and e.resultIO
|
1101
|
+
end
|
1102
|
+
return { :success => updated, :result_io => result_io }
|
1103
|
+
end
|
1104
|
+
|
1105
|
+
def complete_namespace_update(new_ns, old_ns)
|
1106
|
+
self.comp_instances.each do |comp_inst|
|
1107
|
+
comp_inst.cart_properties.each do |prop_key, prop_value|
|
1108
|
+
comp_inst.cart_properties[prop_key] = prop_value.gsub(/-#{old_ns}.#{Rails.configuration.openshift[:domain_suffix]}/, "-#{new_ns}.#{Rails.configuration.openshift[:domain_suffix]}")
|
1109
|
+
end
|
1110
|
+
end
|
1111
|
+
self.embedded.each_key do |framework|
|
1112
|
+
if self.embedded[framework].has_key?('info')
|
1113
|
+
info = self.embedded[framework]['info']
|
1114
|
+
info.gsub!(/-#{old_ns}.#{Rails.configuration.openshift[:domain_suffix]}/, "-#{new_ns}.#{Rails.configuration.openshift[:domain_suffix]}")
|
1115
|
+
self.embedded[framework]['info'] = info
|
1116
|
+
end
|
1117
|
+
end
|
1118
|
+
|
1119
|
+
# elaborate descriptor again to execute connections, because connections need to be renewed
|
1120
|
+
self.elaborate_descriptor
|
1121
|
+
self.execute_connections
|
1122
|
+
self.domain.namespace = new_ns
|
1123
|
+
self.save
|
1124
|
+
end
|
1125
|
+
|
1126
|
+
def add_alias(t_server_alias)
|
1127
|
+
# Server aliases validate as DNS host names in accordance with RFC
|
1128
|
+
# 1123 and RFC 952. Additionally, OpenShift does not allow an
|
1129
|
+
# Alias to be an IP address or a host in the service domain.
|
1130
|
+
# Since DNS is case insensitive, all names are downcased for
|
1131
|
+
# indexing/compares.
|
1132
|
+
server_alias = t_server_alias.downcase
|
1133
|
+
if !(server_alias =~ /\A[0-9a-zA-Z\-\.]+\z/) or
|
1134
|
+
(server_alias =~ /#{Rails.configuration.openshift[:domain_suffix]}$/) or
|
1135
|
+
(server_alias.length > 255 ) or
|
1136
|
+
(server_alias.length == 0 ) or
|
1137
|
+
(server_alias =~ /^\d+\.\d+\.\d+\.\d+$/)
|
1138
|
+
raise OpenShift::UserException.new("Invalid Server Alias '#{t_server_alias}' specified", 105)
|
1139
|
+
end
|
1140
|
+
|
1141
|
+
self.aliases = [] unless self.aliases
|
1142
|
+
raise OpenShift::UserException.new("Alias '#{server_alias}' already exists for '#{@name}'", 255) if self.aliases.include? server_alias
|
1143
|
+
reply = ResultIO.new
|
1144
|
+
begin
|
1145
|
+
self.aliases.push(server_alias)
|
1146
|
+
self.save
|
1147
|
+
reply.append self.container.add_alias(self, self.gear, server_alias)
|
1148
|
+
rescue Exception => e
|
1149
|
+
Rails.logger.debug e.message
|
1150
|
+
Rails.logger.debug e.backtrace.inspect
|
1151
|
+
reply.append self.container.remove_alias(self, self.gear, server_alias)
|
1152
|
+
self.aliases.delete(server_alias)
|
1153
|
+
self.save
|
1154
|
+
raise
|
1155
|
+
end
|
1156
|
+
reply
|
1157
|
+
end
|
1158
|
+
|
1159
|
+
def remove_alias(t_server_alias)
|
1160
|
+
server_alias = t_server_alias.downcase
|
1161
|
+
self.aliases = [] unless self.aliases
|
1162
|
+
reply = ResultIO.new
|
1163
|
+
begin
|
1164
|
+
reply.append self.container.remove_alias(self, self.gear, server_alias)
|
1165
|
+
rescue Exception => e
|
1166
|
+
Rails.logger.debug e.message
|
1167
|
+
Rails.logger.debug e.backtrace.inspect
|
1168
|
+
raise
|
1169
|
+
ensure
|
1170
|
+
if self.aliases.include? server_alias
|
1171
|
+
self.aliases.delete(server_alias)
|
1172
|
+
self.save
|
1173
|
+
else
|
1174
|
+
raise OpenShift::UserException.new("Alias '#{server_alias}' does not exist for '#{@name}'", 255, reply)
|
1175
|
+
end
|
1176
|
+
end
|
1177
|
+
reply
|
1178
|
+
end
|
1179
|
+
|
1180
|
+
def add_dependency(dep)
|
1181
|
+
reply = ResultIO.new
|
1182
|
+
self.class.notify_observers(:before_add_dependency, {:application => self, :dependency => dep, :reply => reply})
|
1183
|
+
# Create persistent storage app entry on configure (one of the first things)
|
1184
|
+
Rails.logger.debug "DEBUG: Adding embedded app info from persistent storage: #{@name}:#{dep}"
|
1185
|
+
self.cart_data = {} if @cart_data.nil?
|
1186
|
+
|
1187
|
+
raise OpenShift::UserException.new("#{dep} already embedded in '#{@name}'", 136) if self.embedded.include? dep
|
1188
|
+
if self.scalable
|
1189
|
+
allowed_cartridges = SCALABLE_EMBEDDED_CARTS & Application.get_available_cartridges.sort
|
1190
|
+
raise OpenShift::UserException.new("#{dep} cannot be embedded in scalable app '#{@name}'. Allowed cartridges: #{allowed_cartridges.join(', ')}", 108) if not SCALABLE_EMBEDDED_CARTS.include? dep
|
1191
|
+
end
|
1192
|
+
add_to_requires_feature(dep)
|
1193
|
+
begin
|
1194
|
+
reply.append self.configure_dependencies
|
1195
|
+
self.execute_connections
|
1196
|
+
rescue Exception => e
|
1197
|
+
remove_from_requires_feature(dep)
|
1198
|
+
self.elaborate_descriptor
|
1199
|
+
self.save
|
1200
|
+
raise e
|
1201
|
+
end
|
1202
|
+
|
1203
|
+
self.class.notify_observers(:after_add_dependency, {:application => self, :dependency => dep, :reply => reply})
|
1204
|
+
reply
|
1205
|
+
end
|
1206
|
+
|
1207
|
+
def remove_dependency(dep)
|
1208
|
+
reply = ResultIO.new
|
1209
|
+
self.class.notify_observers(:before_remove_dependency, {:application => self, :dependency => dep, :reply => reply})
|
1210
|
+
self.embedded = {} unless self.embedded
|
1211
|
+
|
1212
|
+
raise OpenShift::UserException.new("#{dep} not embedded in '#{@name}', try adding it first", 135) unless self.embedded.include? dep
|
1213
|
+
raise OpenShift::UserException.new("#{dep} is not allowed to be removed from '#{@name}'. It is a required dependency for a scalable application.", 137) if (self.scalable and self.proxy_cartridge==dep)
|
1214
|
+
remove_from_requires_feature(dep)
|
1215
|
+
elaborate_descriptor { |removed_component_instances|
|
1216
|
+
#remove unused components
|
1217
|
+
removed_component_instances.each do |comp_inst_name|
|
1218
|
+
comp_inst = self.comp_instance_map[comp_inst_name]
|
1219
|
+
next if comp_inst.parent_cart_name == self.name
|
1220
|
+
group_inst = self.group_instance_map[comp_inst.group_instance_name]
|
1221
|
+
s,f = run_on_gears(group_inst.gears, reply, false) do |gear, r|
|
1222
|
+
unless gear.configured_components.length == 1 && gear.configured_components.first == comp_inst.name
|
1223
|
+
reply.append gear.deconfigure(comp_inst)
|
1224
|
+
process_cartridge_commands(r)
|
1225
|
+
end
|
1226
|
+
end
|
1227
|
+
|
1228
|
+
f.each do |failed_data|
|
1229
|
+
Rails.logger.debug("Failed to deconfigure cartridge #{comp_inst.parent_cart_name} on gear #{failed_data[:gear].server_identity}:#{failed_data[:gear].uuid}")
|
1230
|
+
Rails.logger.debug("Exception #{failed_data[:exception].message}")
|
1231
|
+
Rails.logger.debug("#{failed_data[:exception].backtrace.inspect}")
|
1232
|
+
end
|
1233
|
+
|
1234
|
+
run_on_gears(group_inst.gears, reply, false) do |gear, r|
|
1235
|
+
if gear.configured_components.empty? || (gear.configured_components.length == 1 && gear.configured_components.first == comp_inst.name)
|
1236
|
+
reply.append group_inst.remove_gear(gear)
|
1237
|
+
end
|
1238
|
+
end
|
1239
|
+
|
1240
|
+
if f.length > 0
|
1241
|
+
raise Exception.new("Failed to remove #{dep} from application #{self.name}. Try again or report to OpenShift Support.")
|
1242
|
+
end
|
1243
|
+
end
|
1244
|
+
}
|
1245
|
+
self.save
|
1246
|
+
self.class.notify_observers(:after_remove_dependency, {:application => self, :dependency => dep, :reply => reply})
|
1247
|
+
reply
|
1248
|
+
end
|
1249
|
+
|
1250
|
+
def get_public_ip_address
|
1251
|
+
begin
|
1252
|
+
return self.container.get_public_ip_address
|
1253
|
+
rescue Exception=>e
|
1254
|
+
Rails.logger.debug e.backtrace.inspect
|
1255
|
+
return nil
|
1256
|
+
end
|
1257
|
+
end
|
1258
|
+
|
1259
|
+
# Returns the first Gear object on which the application is running
|
1260
|
+
# @return [Gear]
|
1261
|
+
# @deprecated
|
1262
|
+
def gear
|
1263
|
+
if self.group_instances.nil?
|
1264
|
+
elaborate_descriptor
|
1265
|
+
end
|
1266
|
+
|
1267
|
+
if scalable
|
1268
|
+
self.group_instance_map.keys.each { |ginst_name|
|
1269
|
+
return self.group_instance_map[ginst_name].gears.first if ginst_name.include? self.proxy_cartridge
|
1270
|
+
}
|
1271
|
+
end
|
1272
|
+
|
1273
|
+
group_instance = self.group_instances.first
|
1274
|
+
return nil unless group_instance
|
1275
|
+
|
1276
|
+
return group_instance.gears.first
|
1277
|
+
end
|
1278
|
+
|
1279
|
+
def scaling_limits(dependency=nil)
|
1280
|
+
if dependency.nil?
|
1281
|
+
if self.scalable
|
1282
|
+
dependency = "web"
|
1283
|
+
else
|
1284
|
+
dependency = self.framework
|
1285
|
+
end
|
1286
|
+
end
|
1287
|
+
prof = @profile_name_map[@default_profile]
|
1288
|
+
cinst = ComponentInstance::find_component_in_cart(prof, self, dependency, self.get_name_prefix)
|
1289
|
+
raise OpenShift::NodeException.new("Cannot find #{dependency} component in app #{self.name}.", 135, ResultIO.new) if cinst.nil?
|
1290
|
+
|
1291
|
+
ginst = self.group_instance_map[cinst.group_instance_name]
|
1292
|
+
return ginst.min,ginst.max
|
1293
|
+
end
|
1294
|
+
|
1295
|
+
# Get the ApplicationContainerProxy object for the first gear the application is running on
|
1296
|
+
# @return [ApplicationContainerProxy]
|
1297
|
+
# @deprecated
|
1298
|
+
def container
|
1299
|
+
return nil if self.gear.nil?
|
1300
|
+
return self.gear.get_proxy
|
1301
|
+
end
|
1302
|
+
|
1303
|
+
# Get the name of framework cartridge in use by the application without the version suffix
|
1304
|
+
# @return [String]
|
1305
|
+
# @deprecated
|
1306
|
+
def framework_cartridge
|
1307
|
+
fcart = self.framework
|
1308
|
+
return fcart.split('-')[0..-2].join('-') unless fcart.nil?
|
1309
|
+
return nil
|
1310
|
+
end
|
1311
|
+
|
1312
|
+
# Get the name of framework cartridge in use by the application
|
1313
|
+
# @return [String]
|
1314
|
+
# @deprecated
|
1315
|
+
def framework
|
1316
|
+
framework_carts = CartridgeCache.cartridge_names('standalone')
|
1317
|
+
self.comp_instance_map.each { |cname, cinst|
|
1318
|
+
cartname = cinst.parent_cart_name
|
1319
|
+
return cartname if framework_carts.include? cartname
|
1320
|
+
}
|
1321
|
+
return nil
|
1322
|
+
end
|
1323
|
+
|
1324
|
+
# Provide a list of direct dependencies of the application that are hosted on the same gear as the "framework" cartridge.
|
1325
|
+
# @return [Array<String>]
|
1326
|
+
# @deprecated
|
1327
|
+
def embedded
|
1328
|
+
embedded_carts = CartridgeCache.cartridge_names('embedded')
|
1329
|
+
retval = {}
|
1330
|
+
self.comp_instance_map.values.each do |comp_inst|
|
1331
|
+
if embedded_carts.include?(comp_inst.parent_cart_name)
|
1332
|
+
if comp_inst.cart_data.first.nil?
|
1333
|
+
retval[comp_inst.parent_cart_name] = comp_inst.cart_properties
|
1334
|
+
else
|
1335
|
+
retval[comp_inst.parent_cart_name] = comp_inst.cart_properties.merge({"info" => comp_inst.cart_data.first})
|
1336
|
+
end
|
1337
|
+
end
|
1338
|
+
end
|
1339
|
+
retval
|
1340
|
+
end
|
1341
|
+
|
1342
|
+
# Provide a way of updating the component information for a given cartridge
|
1343
|
+
# @deprecated
|
1344
|
+
def set_embedded_cart_info(cart_name, info)
|
1345
|
+
self.comp_instance_map.values.each do |comp_inst|
|
1346
|
+
comp_inst.cart_data = [info] if cart_name == comp_inst.parent_cart_name
|
1347
|
+
end
|
1348
|
+
end
|
1349
|
+
|
1350
|
+
# Provides an array version of the component instance map for saving in the datastore.
|
1351
|
+
# @return [Array<Hash>]
|
1352
|
+
def comp_instances
|
1353
|
+
@comp_instance_map = {} if @comp_instance_map.nil?
|
1354
|
+
@comp_instance_map.values
|
1355
|
+
end
|
1356
|
+
|
1357
|
+
# Rebuilds the component instance map from an array of hashes or objects
|
1358
|
+
# @param [Array<Hash>] data
|
1359
|
+
def comp_instances=(data)
|
1360
|
+
comp_instance_map_will_change!
|
1361
|
+
@comp_instance_map = {} if @comp_instance_map.nil?
|
1362
|
+
data.each do |value|
|
1363
|
+
if value.class == ComponentInstance
|
1364
|
+
@comp_instance_map[value.name] = value
|
1365
|
+
else
|
1366
|
+
key = value["name"]
|
1367
|
+
@comp_instance_map[key] = ComponentInstance.new
|
1368
|
+
@comp_instance_map[key].attributes=value
|
1369
|
+
end
|
1370
|
+
end
|
1371
|
+
end
|
1372
|
+
|
1373
|
+
# Provides an array version of the group instance map for saving in the datastore.
|
1374
|
+
# @return [Array<Hash>]
|
1375
|
+
def group_instances
|
1376
|
+
@group_instance_map = {} if @group_instance_map.nil?
|
1377
|
+
values = @group_instance_map.values.uniq
|
1378
|
+
keys = @group_instance_map.keys
|
1379
|
+
|
1380
|
+
values.each do |group_inst|
|
1381
|
+
group_inst.reused_by = keys.clone.delete_if{ |k| @group_instance_map[k] != group_inst }
|
1382
|
+
end
|
1383
|
+
|
1384
|
+
values
|
1385
|
+
end
|
1386
|
+
|
1387
|
+
# Rebuilds the group instance map from an array of hashes or objects
|
1388
|
+
# @param [Array<Hash>] data
|
1389
|
+
def group_instances=(data)
|
1390
|
+
group_instance_map_will_change!
|
1391
|
+
@group_instance_map = {} if @group_instance_map.nil?
|
1392
|
+
data.each do |value|
|
1393
|
+
if value.class == GroupInstance
|
1394
|
+
value.reused_by.each do |k|
|
1395
|
+
@group_instance_map[k] = value
|
1396
|
+
end
|
1397
|
+
else
|
1398
|
+
ginst = GroupInstance.new(self)
|
1399
|
+
ginst.attributes=value
|
1400
|
+
ginst.reused_by.each do |k|
|
1401
|
+
@group_instance_map[k] = ginst
|
1402
|
+
end
|
1403
|
+
end
|
1404
|
+
end
|
1405
|
+
end
|
1406
|
+
|
1407
|
+
def get_name_prefix
|
1408
|
+
return "@@app"
|
1409
|
+
end
|
1410
|
+
|
1411
|
+
def add_group_override(from, to)
|
1412
|
+
prof = @profile_name_map[@default_profile]
|
1413
|
+
prof.group_overrides = [] if prof.group_overrides.nil?
|
1414
|
+
prof.group_overrides << [from, to]
|
1415
|
+
end
|
1416
|
+
|
1417
|
+
# Parse the descriptor and build or update the runtime descriptor structure
|
1418
|
+
def elaborate_descriptor
|
1419
|
+
self.group_instance_map = {} if group_instance_map.nil?
|
1420
|
+
self.comp_instance_map = {} if comp_instance_map.nil?
|
1421
|
+
self.working_comp_inst_hash = {}
|
1422
|
+
self.working_group_inst_hash = {}
|
1423
|
+
self.group_override_map = {}
|
1424
|
+
self.conn_endpoints_list = []
|
1425
|
+
default_profile = @profile_name_map[@default_profile]
|
1426
|
+
|
1427
|
+
default_profile.groups.each { |g|
|
1428
|
+
#gpath = self.name + "." + g.name
|
1429
|
+
gpath = self.get_name_prefix + g.get_name_prefix
|
1430
|
+
gi = working_group_inst_hash[gpath]
|
1431
|
+
if gi.nil?
|
1432
|
+
gi = self.group_instance_map[gpath]
|
1433
|
+
if gi.nil?
|
1434
|
+
gi = GroupInstance.new(self, self.name, self.default_profile, g.name, gpath)
|
1435
|
+
else
|
1436
|
+
gi.merge(self.name, self.default_profile, g.name, gpath)
|
1437
|
+
end
|
1438
|
+
else
|
1439
|
+
gi.merge(self.name, self.default_profile, g.name, gpath)
|
1440
|
+
end
|
1441
|
+
self.group_instance_map[gpath] = gi
|
1442
|
+
self.working_group_inst_hash[gpath] = gi
|
1443
|
+
gi.elaborate(default_profile, g, self.get_name_prefix, self)
|
1444
|
+
}
|
1445
|
+
|
1446
|
+
# make connection_endpoints out of provided connections
|
1447
|
+
default_profile.connections.each { |conn|
|
1448
|
+
inst1 = ComponentInstance::find_component_in_cart(default_profile, self, conn.components[0], self.get_name_prefix)
|
1449
|
+
inst2 = ComponentInstance::find_component_in_cart(default_profile, self, conn.components[1], self.get_name_prefix)
|
1450
|
+
ComponentInstance::establish_connections(inst1, inst2, self)
|
1451
|
+
}
|
1452
|
+
# check self.comp_instance_map for component instances
|
1453
|
+
# check self.group_instance_map for group instances
|
1454
|
+
# check self.conn_endpoints_list for list of connection endpoints (fully resolved)
|
1455
|
+
|
1456
|
+
# resolve group co-locations
|
1457
|
+
colocate_groups
|
1458
|
+
|
1459
|
+
# get configure_order and start_order
|
1460
|
+
get_exec_order(default_profile)
|
1461
|
+
|
1462
|
+
deleted_components_list = []
|
1463
|
+
self.comp_instance_map.each { |k,v| deleted_components_list << k if self.working_comp_inst_hash[k].nil? }
|
1464
|
+
|
1465
|
+
yield deleted_components_list if block_given?
|
1466
|
+
|
1467
|
+
# delete entries in {group,comp}_instance_map that do
|
1468
|
+
# not exist in working_{group,comp}_inst_hash
|
1469
|
+
self.group_instance_map.delete_if { |k,v|
|
1470
|
+
v.component_instances.delete(k) if self.working_comp_inst_hash[k].nil? and v.component_instances.include?(k)
|
1471
|
+
self.working_group_inst_hash[k].nil?
|
1472
|
+
}
|
1473
|
+
self.comp_instance_map.delete_if { |k,v| self.working_comp_inst_hash[k].nil? }
|
1474
|
+
end
|
1475
|
+
|
1476
|
+
# Get path for checking application health
|
1477
|
+
# @return [String]
|
1478
|
+
def health_check_path
|
1479
|
+
case self.framework_cartridge
|
1480
|
+
when 'php'
|
1481
|
+
page = 'health_check.php'
|
1482
|
+
when 'zend'
|
1483
|
+
page = 'health_check.php'
|
1484
|
+
when 'perl'
|
1485
|
+
page = 'health_check.pl'
|
1486
|
+
else
|
1487
|
+
page = 'health'
|
1488
|
+
end
|
1489
|
+
end
|
1490
|
+
|
1491
|
+
def process_cartridge_commands(result)
|
1492
|
+
commands = result.cart_commands
|
1493
|
+
self.ssh_keys = {} unless self.ssh_keys
|
1494
|
+
app_jobs = { 'add_ssh_keys' => [], 'remove_ssh_keys' => [], 'remove_env_vars' => [] }
|
1495
|
+
commands.each do |command_item|
|
1496
|
+
case command_item[:command]
|
1497
|
+
when "SYSTEM_SSH_KEY_ADD"
|
1498
|
+
key = command_item[:args][0]
|
1499
|
+
self.user.add_system_ssh_key(self.name, key)
|
1500
|
+
when "SYSTEM_SSH_KEY_REMOVE"
|
1501
|
+
self.user.remove_system_ssh_key(self.name)
|
1502
|
+
when "APP_SSH_KEY_ADD"
|
1503
|
+
key_name = command_item[:args][0]
|
1504
|
+
key = command_item[:args][1]
|
1505
|
+
self.ssh_keys[key_name] = key
|
1506
|
+
app_jobs['add_ssh_keys'] << [key_name,key]
|
1507
|
+
when "APP_SSH_KEY_REMOVE"
|
1508
|
+
key_name = command_item[:args][0]
|
1509
|
+
key = self.ssh_keys.delete(key_name)
|
1510
|
+
app_jobs['remove_ssh_keys'] << key unless key.nil?
|
1511
|
+
when "ENV_VAR_ADD"
|
1512
|
+
key = command_item[:args][0]
|
1513
|
+
value = command_item[:args][1]
|
1514
|
+
self.user.add_env_var(key,value)
|
1515
|
+
when "ENV_VAR_REMOVE"
|
1516
|
+
key = command_item[:args][0]
|
1517
|
+
self.user.remove_env_var(key)
|
1518
|
+
when "APP_ENV_VAR_REMOVE"
|
1519
|
+
key = command_item[:args][0]
|
1520
|
+
app_jobs['remove_env_vars'] << key unless key.nil?
|
1521
|
+
when "BROKER_KEY_ADD"
|
1522
|
+
iv, token = OpenShift::AuthService.instance.generate_broker_key(self)
|
1523
|
+
self.user.add_save_job('adds', 'broker_auth_keys', [self.uuid, iv, token])
|
1524
|
+
when "BROKER_KEY_REMOVE"
|
1525
|
+
self.user.add_save_job('removes', 'broker_auth_keys', [self.uuid])
|
1526
|
+
end
|
1527
|
+
end
|
1528
|
+
if user.save_jobs
|
1529
|
+
user.save
|
1530
|
+
end
|
1531
|
+
handle = RemoteJob.create_parallel_job
|
1532
|
+
tag = ""
|
1533
|
+
RemoteJob.run_parallel_on_gears(self.gears, handle) { |exec_handle, gear|
|
1534
|
+
app_jobs.each do |action,value|
|
1535
|
+
case action
|
1536
|
+
when "remove_env_vars"
|
1537
|
+
value.each { |key|
|
1538
|
+
job = gear.env_var_job_remove(key)
|
1539
|
+
RemoteJob.add_parallel_job(exec_handle, tag, gear, job)
|
1540
|
+
}
|
1541
|
+
when "add_ssh_keys"
|
1542
|
+
value.each { |key_info|
|
1543
|
+
key_name,key = key_info
|
1544
|
+
job = gear.ssh_key_job_add(key, nil, key_name)
|
1545
|
+
RemoteJob.add_parallel_job(exec_handle, tag, gear, job)
|
1546
|
+
}
|
1547
|
+
when "remove_ssh_keys"
|
1548
|
+
value.each { |key|
|
1549
|
+
job = gear.ssh_key_job_remove(key, nil)
|
1550
|
+
RemoteJob.add_parallel_job(exec_handle, tag, gear, job)
|
1551
|
+
}
|
1552
|
+
end
|
1553
|
+
end
|
1554
|
+
}
|
1555
|
+
RemoteJob.get_parallel_run_results(handle) { |tag, gear, output, status|
|
1556
|
+
if status != 0
|
1557
|
+
raise OpenShift::NodeException.new("Error updating settings on gear: #{gear} with status: #{status} and output: #{output}", 143)
|
1558
|
+
end
|
1559
|
+
}
|
1560
|
+
commands.clear
|
1561
|
+
end
|
1562
|
+
|
1563
|
+
def track_usage(gear, event, usage_type=UsageRecord::USAGE_TYPES[:gear_usage])
|
1564
|
+
if Rails.configuration.usage_tracking[:datastore_enabled]
|
1565
|
+
now = Time.now.utc
|
1566
|
+
uuid = OpenShift::Model.gen_uuid
|
1567
|
+
self.usage_records = [] unless usage_records
|
1568
|
+
usage_record = UsageRecord.new(event, user, now, uuid, usage_type)
|
1569
|
+
case usage_type
|
1570
|
+
when UsageRecord::USAGE_TYPES[:gear_usage]
|
1571
|
+
usage_record.gear_uuid = gear.uuid
|
1572
|
+
usage_record.gear_size = gear.node_profile
|
1573
|
+
when UsageRecord::USAGE_TYPES[:addtl_fs_gb]
|
1574
|
+
usage_record.gear_uuid = gear.uuid
|
1575
|
+
usage_record.addtl_fs_gb = gear.group_instance.addtl_fs_gb
|
1576
|
+
end
|
1577
|
+
self.usage_records << usage_record
|
1578
|
+
|
1579
|
+
self.class.notify_observers(:track_usage, {:gear_uuid => gear.uuid, :login => gear.app.user.login, :event => event, :time => now, :uuid => uuid, :usage_type => usage_type, :gear_size => gear.node_profile, :addtl_fs_gb => gear.group_instance.addtl_fs_gb})
|
1580
|
+
end
|
1581
|
+
if Rails.configuration.usage_tracking[:syslog_enabled]
|
1582
|
+
usage_string = "User: #{user.login} Event: #{event}"
|
1583
|
+
case usage_type
|
1584
|
+
when UsageRecord::USAGE_TYPES[:gear_usage]
|
1585
|
+
usage_string += " Gear: #{gear.uuid} Gear Size: #{gear.node_profile}"
|
1586
|
+
when UsageRecord::USAGE_TYPES[:addtl_fs_gb]
|
1587
|
+
usage_string += " Gear: #{gear.uuid} Addtl File System GB: #{gear.group_instance.addtl_fs_gb}"
|
1588
|
+
end
|
1589
|
+
begin
|
1590
|
+
Syslog.open('openshift_usage', Syslog::LOG_PID) { |s| s.notice usage_string }
|
1591
|
+
rescue Exception => e
|
1592
|
+
# Can't fail because of a secondary logging error
|
1593
|
+
Rails.logger.error e.message
|
1594
|
+
Rails.logger.error e.backtrace
|
1595
|
+
end
|
1596
|
+
end
|
1597
|
+
end
|
1598
|
+
|
1599
|
+
private
|
1600
|
+
|
1601
|
+
def get_exec_order(default_profile)
|
1602
|
+
self.configure_order = []
|
1603
|
+
default_profile.configure_order.each { |raw_c_name|
|
1604
|
+
cinst = ComponentInstance::find_component_in_cart(default_profile, self, raw_c_name, self.get_name_prefix)
|
1605
|
+
next if cinst.nil?
|
1606
|
+
ComponentInstance::collect_exec_order(self, cinst, self.configure_order)
|
1607
|
+
self.configure_order << cinst.name if not self.configure_order.include? cinst.name
|
1608
|
+
}
|
1609
|
+
default_profile.groups.each { |g|
|
1610
|
+
g.component_refs.each { |cr|
|
1611
|
+
cpath = self.get_name_prefix + cr.get_name_prefix(default_profile)
|
1612
|
+
cinst = self.comp_instance_map[cpath]
|
1613
|
+
ComponentInstance::collect_exec_order(self, cinst, self.configure_order)
|
1614
|
+
self.configure_order << cpath if not self.configure_order.include? cpath
|
1615
|
+
}
|
1616
|
+
}
|
1617
|
+
self.start_order = self.configure_order
|
1618
|
+
end
|
1619
|
+
|
1620
|
+
def colocate_groups
|
1621
|
+
default_profile = @profile_name_map[@default_profile]
|
1622
|
+
self.conn_endpoints_list.each { |conn|
|
1623
|
+
if conn.from_connector.type.match(/^FILESYSTEM/) or conn.from_connector.type.match(/^AFUNIX/)
|
1624
|
+
cinst1 = self.comp_instance_map[conn.from_comp_inst]
|
1625
|
+
ginst1 = self.group_instance_map[cinst1.group_instance_name]
|
1626
|
+
cinst2 = self.comp_instance_map[conn.to_comp_inst]
|
1627
|
+
ginst2 = self.group_instance_map[cinst2.group_instance_name]
|
1628
|
+
next if ginst1==ginst2
|
1629
|
+
# these two group instances need to be colocated
|
1630
|
+
ginst1.merge_inst(ginst2)
|
1631
|
+
self.group_instance_map[cinst2.group_instance_name] = ginst1
|
1632
|
+
end
|
1633
|
+
}
|
1634
|
+
generate_group_overrides(default_profile)
|
1635
|
+
auto_merge_top_groups(default_profile)
|
1636
|
+
end
|
1637
|
+
|
1638
|
+
def generate_group_overrides(default_profile)
|
1639
|
+
default_profile.group_overrides.each do |go|
|
1640
|
+
go_copy = go.dup
|
1641
|
+
n = go_copy.pop
|
1642
|
+
go_copy.each { |v|
|
1643
|
+
from_cinst = ComponentInstance::find_component_in_cart(default_profile, self, v, self.get_name_prefix)
|
1644
|
+
to_cinst = ComponentInstance::find_component_in_cart(default_profile, self, n, self.get_name_prefix)
|
1645
|
+
next if from_cinst.nil? or to_cinst.nil?
|
1646
|
+
from_gpath = from_cinst.group_instance_name
|
1647
|
+
to_gpath = to_cinst.group_instance_name
|
1648
|
+
group_override_map[from_gpath] = to_gpath
|
1649
|
+
group_override_map[to_gpath] = from_gpath
|
1650
|
+
}
|
1651
|
+
end
|
1652
|
+
end
|
1653
|
+
|
1654
|
+
def auto_merge_top_groups(default_profile)
|
1655
|
+
if self.scalable
|
1656
|
+
group_name_list = self.group_instance_map.keys.dup
|
1657
|
+
group_name_list.each { |gname|
|
1658
|
+
mapped_to = group_override_map[gname]
|
1659
|
+
next if mapped_to.nil?
|
1660
|
+
ginst1 = self.group_instance_map[gname]
|
1661
|
+
ginst2 = self.group_instance_map[mapped_to]
|
1662
|
+
next if ginst1==ginst2
|
1663
|
+
ginst1.merge_inst(ginst2)
|
1664
|
+
self.group_instance_map[mapped_to] = ginst1
|
1665
|
+
}
|
1666
|
+
else
|
1667
|
+
first_group = default_profile.groups[0]
|
1668
|
+
gpath = self.get_name_prefix + first_group.get_name_prefix
|
1669
|
+
gi = self.group_instance_map[gpath]
|
1670
|
+
first_group.component_refs.each { |comp_ref|
|
1671
|
+
cpath = self.get_name_prefix + comp_ref.get_name_prefix(default_profile)
|
1672
|
+
ci = self.comp_instance_map[cpath]
|
1673
|
+
ci.dependencies.each { |cdep|
|
1674
|
+
cdepinst = self.comp_instance_map[cdep]
|
1675
|
+
ginst = self.group_instance_map[cdepinst.group_instance_name]
|
1676
|
+
next if ginst==gi
|
1677
|
+
Rails.logger.debug "Auto-merging group #{ginst.name} into #{gi.name}"
|
1678
|
+
# merge ginst into gi
|
1679
|
+
#gi.merge(ginst.cart_name, ginst.profile_name, ginst.group_name, ginst.name, ginst.component_instances)
|
1680
|
+
gi.merge_inst(ginst)
|
1681
|
+
self.group_instance_map[cdepinst.group_instance_name] = gi
|
1682
|
+
}
|
1683
|
+
}
|
1684
|
+
end
|
1685
|
+
end
|
1686
|
+
|
1687
|
+
|
1688
|
+
# Runs the provided block on a set of containers
|
1689
|
+
# @param [Array<Gear>] Array of containers to run the block on. If nil, will run on all containers.
|
1690
|
+
# @param [Boolean] fail_fast Stop running immediately if an exception is raised
|
1691
|
+
# @param [Block]
|
1692
|
+
# @return [<successful_runs, failed_runs>] List of containers where the runs succeeded/failed
|
1693
|
+
def run_on_gears(gears=nil, result_io = nil, fail_fast=true, &block)
|
1694
|
+
successful_runs = []
|
1695
|
+
failed_runs = []
|
1696
|
+
gears = self.gears if gears.nil?
|
1697
|
+
|
1698
|
+
gears.dup.each do |gear|
|
1699
|
+
begin
|
1700
|
+
retval = block.call(gear, result_io)
|
1701
|
+
successful_runs.push({:gear => gear, :return => retval})
|
1702
|
+
rescue Exception => e
|
1703
|
+
Rails.logger.error e.message
|
1704
|
+
Rails.logger.error e.inspect
|
1705
|
+
Rails.logger.error e.backtrace.inspect
|
1706
|
+
failed_runs.push({:gear => gear, :exception => e})
|
1707
|
+
if (!result_io.nil? && e.kind_of?(OpenShift::OOException) && !e.resultIO.nil?)
|
1708
|
+
result_io.append(e.resultIO)
|
1709
|
+
end
|
1710
|
+
if fail_fast
|
1711
|
+
raise OpenShift::GearsException.new(successful_runs, failed_runs, e)
|
1712
|
+
end
|
1713
|
+
end
|
1714
|
+
end
|
1715
|
+
|
1716
|
+
return successful_runs, failed_runs
|
1717
|
+
end
|
1718
|
+
end
|