jtzemp-poolparty 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGELOG +23 -0
- data/LICENSE +22 -0
- data/README +139 -0
- data/Rakefile +109 -0
- data/assets/clouds.png +0 -0
- data/bin/instance +68 -0
- data/bin/pool +83 -0
- data/bin/poolnotify +34 -0
- data/config/cloud_master_takeover +17 -0
- data/config/create_proxy_ami.sh +582 -0
- data/config/haproxy.conf +29 -0
- data/config/heartbeat.conf +8 -0
- data/config/heartbeat_authkeys.conf +2 -0
- data/config/installers/ubuntu_install.sh +77 -0
- data/config/monit.conf +9 -0
- data/config/monit/haproxy.monit.conf +8 -0
- data/config/monit/nginx.monit.conf +0 -0
- data/config/nginx.conf +24 -0
- data/config/reconfigure_instances_script.sh +37 -0
- data/config/sample-config.yml +23 -0
- data/config/scp_instances_script.sh +12 -0
- data/lib/core/array.rb +16 -0
- data/lib/core/exception.rb +9 -0
- data/lib/core/float.rb +13 -0
- data/lib/core/hash.rb +11 -0
- data/lib/core/kernel.rb +12 -0
- data/lib/core/module.rb +22 -0
- data/lib/core/object.rb +21 -0
- data/lib/core/proc.rb +15 -0
- data/lib/core/string.rb +56 -0
- data/lib/core/time.rb +41 -0
- data/lib/helpers/plugin_spec_helper.rb +58 -0
- data/lib/modules/callback.rb +133 -0
- data/lib/modules/ec2_wrapper.rb +108 -0
- data/lib/modules/file_writer.rb +38 -0
- data/lib/modules/safe_instance.rb +31 -0
- data/lib/modules/sprinkle_overrides.rb +27 -0
- data/lib/modules/vlad_override.rb +83 -0
- data/lib/poolparty.rb +131 -0
- data/lib/poolparty/application.rb +199 -0
- data/lib/poolparty/init.rb +6 -0
- data/lib/poolparty/master.rb +492 -0
- data/lib/poolparty/monitors.rb +11 -0
- data/lib/poolparty/monitors/cpu.rb +23 -0
- data/lib/poolparty/monitors/memory.rb +33 -0
- data/lib/poolparty/monitors/web.rb +29 -0
- data/lib/poolparty/optioner.rb +20 -0
- data/lib/poolparty/plugin.rb +78 -0
- data/lib/poolparty/provider.rb +104 -0
- data/lib/poolparty/provider/essential.rb +6 -0
- data/lib/poolparty/provider/git.rb +8 -0
- data/lib/poolparty/provider/haproxy.rb +9 -0
- data/lib/poolparty/provider/heartbeat.rb +6 -0
- data/lib/poolparty/provider/rsync.rb +8 -0
- data/lib/poolparty/provider/ruby.rb +65 -0
- data/lib/poolparty/provider/s3fuse.rb +22 -0
- data/lib/poolparty/remote_instance.rb +250 -0
- data/lib/poolparty/remoter.rb +171 -0
- data/lib/poolparty/remoting.rb +137 -0
- data/lib/poolparty/scheduler.rb +93 -0
- data/lib/poolparty/tasks.rb +47 -0
- data/lib/poolparty/tasks/cloud.rake +57 -0
- data/lib/poolparty/tasks/development.rake +78 -0
- data/lib/poolparty/tasks/ec2.rake +20 -0
- data/lib/poolparty/tasks/instance.rake +63 -0
- data/lib/poolparty/tasks/plugins.rake +30 -0
- data/lib/poolparty/tasks/server.rake +42 -0
- data/lib/poolparty/thread_pool.rb +94 -0
- data/lib/s3/s3_object_store_folders.rb +44 -0
- data/poolparty.gemspec +71 -0
- data/spec/files/describe_response +37 -0
- data/spec/files/multi_describe_response +69 -0
- data/spec/files/remote_desc_response +37 -0
- data/spec/helpers/ec2_mock.rb +57 -0
- data/spec/lib/core/core_spec.rb +26 -0
- data/spec/lib/core/kernel_spec.rb +24 -0
- data/spec/lib/core/string_spec.rb +28 -0
- data/spec/lib/modules/callback_spec.rb +213 -0
- data/spec/lib/modules/file_writer_spec.rb +74 -0
- data/spec/lib/poolparty/application_spec.rb +135 -0
- data/spec/lib/poolparty/ec2_wrapper_spec.rb +110 -0
- data/spec/lib/poolparty/master_spec.rb +479 -0
- data/spec/lib/poolparty/optioner_spec.rb +34 -0
- data/spec/lib/poolparty/plugin_spec.rb +115 -0
- data/spec/lib/poolparty/poolparty_spec.rb +60 -0
- data/spec/lib/poolparty/provider_spec.rb +74 -0
- data/spec/lib/poolparty/remote_instance_spec.rb +178 -0
- data/spec/lib/poolparty/remoter_spec.rb +72 -0
- data/spec/lib/poolparty/remoting_spec.rb +148 -0
- data/spec/lib/poolparty/scheduler_spec.rb +70 -0
- data/spec/monitors/cpu_monitor_spec.rb +39 -0
- data/spec/monitors/memory_spec.rb +51 -0
- data/spec/monitors/misc_monitor_spec.rb +51 -0
- data/spec/monitors/web_spec.rb +40 -0
- data/spec/spec_helper.rb +53 -0
- metadata +312 -0
data/lib/poolparty.rb
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
=begin rdoc
|
|
2
|
+
The main file, contains the client and the server application methods
|
|
3
|
+
=end
|
|
4
|
+
$:.unshift File.dirname(__FILE__) # For use/testing when no gem is installed
|
|
5
|
+
|
|
6
|
+
$TRACE = true
|
|
7
|
+
|
|
8
|
+
# rubygems
|
|
9
|
+
require 'rubygems'
|
|
10
|
+
require "aws/s3"
|
|
11
|
+
require "EC2"
|
|
12
|
+
require "aska"
|
|
13
|
+
begin
|
|
14
|
+
require 'crafterm-sprinkle'
|
|
15
|
+
rescue LoadError
|
|
16
|
+
require 'sprinkle'
|
|
17
|
+
end
|
|
18
|
+
require "pp"
|
|
19
|
+
require "tempfile"
|
|
20
|
+
|
|
21
|
+
begin
|
|
22
|
+
require 'fastthread'
|
|
23
|
+
require 'system_timer'
|
|
24
|
+
@@timer = SystemTimer
|
|
25
|
+
rescue LoadError
|
|
26
|
+
require 'thread'
|
|
27
|
+
require 'timeout'
|
|
28
|
+
@@timer = Timeout
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
## Load PoolParty
|
|
32
|
+
pwd = File.dirname(__FILE__)
|
|
33
|
+
|
|
34
|
+
# Load the required files
|
|
35
|
+
# If there is an init file, load that, otherwise
|
|
36
|
+
# require all the files in each directory
|
|
37
|
+
%w(core modules s3 helpers poolparty).each do |dir|
|
|
38
|
+
Dir["#{pwd}/#{dir}"].each do |dir|
|
|
39
|
+
begin
|
|
40
|
+
require File.join(dir, "init")
|
|
41
|
+
rescue LoadError => e
|
|
42
|
+
Dir["#{pwd}/#{File.basename(dir)}/**"].each {|file| require File.join(dir, File.basename(file))}
|
|
43
|
+
end
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
module PoolParty
|
|
48
|
+
class Version #:nodoc:
|
|
49
|
+
@major = 0
|
|
50
|
+
@minor = 1
|
|
51
|
+
@tiny = 2
|
|
52
|
+
|
|
53
|
+
def self.string
|
|
54
|
+
[@major, @minor, @tiny].join('.')
|
|
55
|
+
end
|
|
56
|
+
end
|
|
57
|
+
def timer
|
|
58
|
+
@@timer
|
|
59
|
+
end
|
|
60
|
+
# PoolParty options
|
|
61
|
+
def options(opts={})
|
|
62
|
+
Application.options(opts)
|
|
63
|
+
end
|
|
64
|
+
# Are we working in verbose-mode
|
|
65
|
+
def verbose?
|
|
66
|
+
options.verbose == true
|
|
67
|
+
end
|
|
68
|
+
# Send a message if we are in verbose-mode
|
|
69
|
+
def message(msg="")
|
|
70
|
+
puts "-- #{msg}" if verbose?
|
|
71
|
+
end
|
|
72
|
+
# Root directory of the application
|
|
73
|
+
def root_dir
|
|
74
|
+
File.expand_path(File.dirname(__FILE__) + "/..")
|
|
75
|
+
end
|
|
76
|
+
# User directory
|
|
77
|
+
def user_dir
|
|
78
|
+
Application.working_directory
|
|
79
|
+
end
|
|
80
|
+
# Write string to a tempfile
|
|
81
|
+
def write_to_temp_file(str="")
|
|
82
|
+
tempfile = Tempfile.new("rand#{rand(1000)}-#{rand(1000)}")
|
|
83
|
+
tempfile.print(str)
|
|
84
|
+
tempfile.flush
|
|
85
|
+
tempfile
|
|
86
|
+
end
|
|
87
|
+
def register_monitor(*names)
|
|
88
|
+
names.each do |name|
|
|
89
|
+
unless registered_monitor?(name)
|
|
90
|
+
PoolParty::Monitors.extend name
|
|
91
|
+
|
|
92
|
+
PoolParty::Master.send :include, name::Master
|
|
93
|
+
PoolParty::RemoteInstance.send :include, name::Remote
|
|
94
|
+
|
|
95
|
+
registered_monitors << name
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
end
|
|
99
|
+
def registered_monitor?(name); registered_monitors.include?(name); end
|
|
100
|
+
def registered_monitors; @@registered_monitors ||= [];end
|
|
101
|
+
|
|
102
|
+
def load_app
|
|
103
|
+
load_monitors
|
|
104
|
+
load_plugins
|
|
105
|
+
end
|
|
106
|
+
def load_monitors
|
|
107
|
+
loc = File.directory?("#{user_dir}/monitors") ? "#{user_dir}/monitors" : "#{root_dir}/lib/poolparty/monitors"
|
|
108
|
+
Dir["#{loc}/*"].each {|f| require f}
|
|
109
|
+
end
|
|
110
|
+
|
|
111
|
+
def load_plugins
|
|
112
|
+
Dir["#{plugin_dir}/**/init.rb"].each {|a| require a} if File.directory?(plugin_dir)
|
|
113
|
+
end
|
|
114
|
+
def reset!
|
|
115
|
+
@@registered_monitors = nil
|
|
116
|
+
@@installed_plugins = nil
|
|
117
|
+
end
|
|
118
|
+
def plugin_dir
|
|
119
|
+
"#{user_dir}/plugins"
|
|
120
|
+
end
|
|
121
|
+
def read_config_file(filename)
|
|
122
|
+
return {} unless filename
|
|
123
|
+
YAML.load(open(filename).read)
|
|
124
|
+
end
|
|
125
|
+
def include_cloud_tasks
|
|
126
|
+
Tasks.new.define_tasks
|
|
127
|
+
end
|
|
128
|
+
|
|
129
|
+
alias_method :tasks, :include_cloud_tasks
|
|
130
|
+
alias_method :include_tasks, :include_cloud_tasks
|
|
131
|
+
end
|
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
=begin rdoc
|
|
2
|
+
Application
|
|
3
|
+
This handles user interaction
|
|
4
|
+
=end
|
|
5
|
+
module PoolParty
|
|
6
|
+
class Application
|
|
7
|
+
class << self
|
|
8
|
+
attr_accessor :verbose, :options
|
|
9
|
+
|
|
10
|
+
# The application options
|
|
11
|
+
def options(opts={})
|
|
12
|
+
@options ||= make_options(opts)
|
|
13
|
+
end
|
|
14
|
+
# Make the options with the config_file overrides included
|
|
15
|
+
# Default config file assumed to be at config/config.yml
|
|
16
|
+
def make_options(opts={})
|
|
17
|
+
loading_options = opts.delete(:optsparse) || {}
|
|
18
|
+
loading_options.merge!( opts || {})
|
|
19
|
+
|
|
20
|
+
load_options!(loading_options) # Load command-line options
|
|
21
|
+
config_file_location = (default_options[:config_file] || opts[:config_file])
|
|
22
|
+
|
|
23
|
+
# If the config_file options are specified and not empty
|
|
24
|
+
unless config_file_location.nil? || config_file_location.empty?
|
|
25
|
+
require "yaml"
|
|
26
|
+
# Try loading the file if it exists
|
|
27
|
+
filedata = File.open("#{config_file_location}").read if File.file?("#{config_file_location}")
|
|
28
|
+
default_options.merge!( YAML.load(filedata) ) if filedata rescue ""
|
|
29
|
+
end
|
|
30
|
+
# We want the command-line to overwrite the config file
|
|
31
|
+
default_options.merge!(local_user_data) unless local_user_data.nil?
|
|
32
|
+
OpenStruct.new(default_options)
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
# Load options via commandline
|
|
36
|
+
def load_options!(opts={})
|
|
37
|
+
require 'optparse'
|
|
38
|
+
OptionParser.new do |op|
|
|
39
|
+
op.banner = opts[:banner] if opts[:banner]
|
|
40
|
+
op.on('-A key', '--access-key key', "Ec2 access key (ENV['AWS_ACCESS_KEY'])") { |key| default_options[:access_key] = key }
|
|
41
|
+
op.on('-S key', '--secret-access-key key', "Ec2 secret access key (ENV['AWS_SECRET_ACCESS'])") { |key| default_options[:secret_access_key] = key }
|
|
42
|
+
op.on('-I ami', '--image-id id', "AMI instance (default: 'ami-40bc5829')") {|id| default_options[:ami] = id }
|
|
43
|
+
op.on('-k keypair', '--keypair name', "Keypair name (ENV['KEYPAIR_NAME'])") { |key| default_options[:keypair] = key }
|
|
44
|
+
op.on('-b bucket', '--bucket bucket', "Application bucket") { |bucket| default_options[:shared_bucket] = bucket }
|
|
45
|
+
# //THIS IS WHERE YOU LEFT OFF
|
|
46
|
+
op.on('-D working directory', '--dir dir', "Working directory") { |d| default_options[:working_directory] = d }
|
|
47
|
+
|
|
48
|
+
op.on('--ec2-dir dir', "Directory with ec2 data (default: '~/.ec2')") {|id| default_options[:ec2_dir] = id }
|
|
49
|
+
op.on('-r names', '--services names', "Monitored services (default: '')") {|id| default_options[:services] = id }
|
|
50
|
+
op.on('-c file', '--config-file file', "Config file (default: '')") {|file| default_options[:config_file] = file }
|
|
51
|
+
op.on('-l plugin_dir', '--plugin-dir dir', "Plugin directory (default: '')") {|file| default_options[:plugin_dir] = file }
|
|
52
|
+
op.on('-p port', '--host_port port', "Run on specific host_port (default: 7788)") { |host_port| default_options[:host_port] = host_port }
|
|
53
|
+
op.on('-m monitors', '--monitors names', "Monitor instances using (default: 'web,memory,cpu')") {|s| default_options[:monitor_load_on] = s }
|
|
54
|
+
op.on('-o port', '--client_port port', "Run on specific client_port (default: 7788)") { |client_port| default_options[:client_port] = client_port }
|
|
55
|
+
op.on('-O os', '--os os', "Configure for os (default: ubuntu)") { |os| default_options[:os] = os }
|
|
56
|
+
op.on('-e env', '--environment env', "Run on the specific environment (default: development)") { |env| default_options[:environment] = env }
|
|
57
|
+
op.on('-a address', '--public-ip address', "Associate this public address with the master node") {|s| default_options[:public_ip] = s}
|
|
58
|
+
op.on('-s size', '--size size', "Run specific sized instance") {|s| default_options[:size] = s}
|
|
59
|
+
op.on('-a name', '--name name', "Application name") {|n| default_options[:app_name] = n}
|
|
60
|
+
op.on('-u username', '--username name', "Login with the user (default: root)") {|s| default_options[:user] = s}
|
|
61
|
+
op.on('-d user-data','--user-data data', "Extra data to send each of the instances (default: "")") { |data| default_options[:user_data] = data.to_str }
|
|
62
|
+
op.on('-i', '--install-on-boot', 'Install the PoolParty and custom software on boot (default: false)') {|b| default_options[:install_on_load] = true}
|
|
63
|
+
op.on('-t seconds', '--polling-time', "Time between polling in seconds (default 50)") {|t| default_options[:polling_time] = t }
|
|
64
|
+
op.on('-v', '--[no-]verbose', 'Run verbosely (default: false)') {|v| default_options[:verbose] = true}
|
|
65
|
+
op.on('-n number', '--minimum-instances', "The minimum number of instances to run at all times (default 1)") {|i| default_options[:minimum_instances] = i.to_i}
|
|
66
|
+
op.on('-x number', '--maximum-instances', "The maximum number of instances to run (default 3)") {|x| default_options[:maximum_instances] = x.to_i}
|
|
67
|
+
|
|
68
|
+
op.on_tail("-V", "Show version") do
|
|
69
|
+
puts Application.version
|
|
70
|
+
exit
|
|
71
|
+
end
|
|
72
|
+
op.on_tail("-h", "-?", "Show this message") do
|
|
73
|
+
puts op
|
|
74
|
+
exit
|
|
75
|
+
end
|
|
76
|
+
end.parse!(opts[:argv] ? opts.delete(:argv) : ARGV.dup)
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
# Basic default options
|
|
80
|
+
# All can be overridden by the command line
|
|
81
|
+
# or in a config.yml file
|
|
82
|
+
def default_options
|
|
83
|
+
@default_options ||= {
|
|
84
|
+
:app_name => "application_name",
|
|
85
|
+
:host_port => 80,
|
|
86
|
+
:client_port => 8001,
|
|
87
|
+
:environment => 'development',
|
|
88
|
+
:verbose => false,
|
|
89
|
+
:logging => true,
|
|
90
|
+
:size => "m1.small",
|
|
91
|
+
:polling_time => "30.seconds",
|
|
92
|
+
:user_data => "",
|
|
93
|
+
:heavy_load => 0.80,
|
|
94
|
+
:light_load => 0.15,
|
|
95
|
+
:minimum_instances => 2,
|
|
96
|
+
:maximum_instances => 4,
|
|
97
|
+
:public_ip => "",
|
|
98
|
+
:access_key => ENV["AWS_ACCESS_KEY"],
|
|
99
|
+
:secret_access_key => ENV["AWS_SECRET_ACCESS"],
|
|
100
|
+
:config_file => if ENV["CONFIG_FILE"] && !ENV["CONFIG_FILE"].empty?
|
|
101
|
+
ENV["CONFIG_FILE"]
|
|
102
|
+
elsif File.file?("config/config.yml")
|
|
103
|
+
"config/config.yml"
|
|
104
|
+
else
|
|
105
|
+
nil
|
|
106
|
+
end,
|
|
107
|
+
:username => "root",
|
|
108
|
+
:ec2_dir => (ENV["EC2_HOME"].nil? || ENV["EC2_HOME"].empty?) ? "~/.ec2" : ENV["EC2_HOME"],
|
|
109
|
+
:keypair => (ENV["KEYPAIR_NAME"].nil? || ENV["KEYPAIR_NAME"].empty?) ? File.basename(`pwd`).strip : ENV["KEYPAIR_NAME"],
|
|
110
|
+
:ami => 'ami-44bd592d',
|
|
111
|
+
:shared_bucket => "",
|
|
112
|
+
:expand_when => "web < 1.5\n memory > 0.85",
|
|
113
|
+
:contract_when => "cpu < 0.20\n memory < 0.10",
|
|
114
|
+
:os => "ubuntu",
|
|
115
|
+
:plugin_dir => "plugins",
|
|
116
|
+
:install_on_load => false,
|
|
117
|
+
:working_directory => Dir.pwd
|
|
118
|
+
}
|
|
119
|
+
end
|
|
120
|
+
# Services monitored by Heartbeat
|
|
121
|
+
def master_managed_services
|
|
122
|
+
"cloud_master_takeover"
|
|
123
|
+
end
|
|
124
|
+
alias_method :managed_services, :master_managed_services
|
|
125
|
+
def launching_user_data
|
|
126
|
+
hash_to_launch_with.to_yaml
|
|
127
|
+
end
|
|
128
|
+
def hash_to_launch_with
|
|
129
|
+
@hash ||= { :polling_time => polling_time,
|
|
130
|
+
:access_key => access_key,
|
|
131
|
+
:secret_access_key => secret_access_key,
|
|
132
|
+
:user_data => user_data,
|
|
133
|
+
:keypair => keypair,
|
|
134
|
+
:keypair_path => "/mnt"
|
|
135
|
+
}
|
|
136
|
+
end
|
|
137
|
+
def local_user_data
|
|
138
|
+
@local_user_data ||= begin
|
|
139
|
+
@@timer.timeout(2.seconds) do
|
|
140
|
+
YAML.load(open("http://169.254.169.254/latest/user-data").read)
|
|
141
|
+
end
|
|
142
|
+
rescue Exception => e
|
|
143
|
+
{}
|
|
144
|
+
end
|
|
145
|
+
end
|
|
146
|
+
# For testing purposes
|
|
147
|
+
def reset!
|
|
148
|
+
@options = nil
|
|
149
|
+
@local_user_data = nil
|
|
150
|
+
end
|
|
151
|
+
# Keypair path
|
|
152
|
+
# Idiom:
|
|
153
|
+
# /Users/username/.ec2/[name]
|
|
154
|
+
def keypair_path
|
|
155
|
+
options.keypair_path ? options.keypair_path : "#{ec2_dir}/#{keypair_name}"
|
|
156
|
+
end
|
|
157
|
+
def keypair_name
|
|
158
|
+
"id_rsa-#{keypair}"
|
|
159
|
+
end
|
|
160
|
+
# Are we in development or test mode
|
|
161
|
+
%w(development production test).each do |env|
|
|
162
|
+
eval <<-EOE
|
|
163
|
+
def #{env}?
|
|
164
|
+
environment == '#{env}'
|
|
165
|
+
end
|
|
166
|
+
EOE
|
|
167
|
+
end
|
|
168
|
+
def environment=(env)
|
|
169
|
+
environment = env
|
|
170
|
+
end
|
|
171
|
+
def maintain_pid_path
|
|
172
|
+
"/var/run/pool_maintain.pid"
|
|
173
|
+
end
|
|
174
|
+
%w(scp_instances_script reconfigure_instances_script).each do |file|
|
|
175
|
+
define_method "sh_#{file}" do
|
|
176
|
+
File.join(File.dirname(__FILE__), "../..", "config", "#{file}.sh")
|
|
177
|
+
end
|
|
178
|
+
end
|
|
179
|
+
# Standard configuration files
|
|
180
|
+
%w(haproxy monit heartbeat heartbeat_authkeys).each do |file|
|
|
181
|
+
define_method "#{file}_config_file" do
|
|
182
|
+
File.join(File.dirname(__FILE__), "../..", "config", "#{file}.conf")
|
|
183
|
+
end
|
|
184
|
+
end
|
|
185
|
+
def version
|
|
186
|
+
PoolParty::Version.string
|
|
187
|
+
end
|
|
188
|
+
def install_on_load?(bool=false)
|
|
189
|
+
options.install_on_load == true || bool
|
|
190
|
+
end
|
|
191
|
+
# Call the options from the Application
|
|
192
|
+
def method_missing(m,*args)
|
|
193
|
+
options.methods.include?("#{m}") ? options.send(m,args) : super
|
|
194
|
+
end
|
|
195
|
+
end
|
|
196
|
+
|
|
197
|
+
end
|
|
198
|
+
|
|
199
|
+
end
|
|
@@ -0,0 +1,492 @@
|
|
|
1
|
+
=begin rdoc
|
|
2
|
+
The basic master for PoolParty
|
|
3
|
+
=end
|
|
4
|
+
module PoolParty
|
|
5
|
+
class Master < Remoting
|
|
6
|
+
include Aska
|
|
7
|
+
include Callbacks
|
|
8
|
+
# ############################
|
|
9
|
+
include Remoter
|
|
10
|
+
# ############################
|
|
11
|
+
include FileWriter
|
|
12
|
+
|
|
13
|
+
def initialize
|
|
14
|
+
super
|
|
15
|
+
|
|
16
|
+
self.class.send :rules, :contract_when, Application.options.contract_when unless are_rules?(:contract_when)
|
|
17
|
+
self.class.send :rules, :expand_when, Application.options.expand_when unless are_rules?(:expand_when)
|
|
18
|
+
end
|
|
19
|
+
# Start the cloud
|
|
20
|
+
def start_cloud!
|
|
21
|
+
start!
|
|
22
|
+
end
|
|
23
|
+
alias_method :start_cloud, :start_cloud!
|
|
24
|
+
# Start the cloud, which launches the minimum_instances
|
|
25
|
+
def start!
|
|
26
|
+
message "Launching minimum_instances"
|
|
27
|
+
launch_minimum_instances
|
|
28
|
+
message "Waiting for master to boot up"
|
|
29
|
+
|
|
30
|
+
wait_for_all_instances_to_boot
|
|
31
|
+
|
|
32
|
+
setup_cloud
|
|
33
|
+
end
|
|
34
|
+
def setup_cloud
|
|
35
|
+
install_cloud
|
|
36
|
+
configure_cloud
|
|
37
|
+
end
|
|
38
|
+
alias_method :start, :start!
|
|
39
|
+
def wait_for_all_instances_to_boot
|
|
40
|
+
reset!
|
|
41
|
+
while !number_of_pending_instances.zero?
|
|
42
|
+
wait "2.seconds" unless Application.test?
|
|
43
|
+
waited = true
|
|
44
|
+
reset!
|
|
45
|
+
end
|
|
46
|
+
unless Application.test? || waited.nil?
|
|
47
|
+
message "Give some time for the instance ssh to start up"
|
|
48
|
+
wait "15.seconds"
|
|
49
|
+
end
|
|
50
|
+
end
|
|
51
|
+
def wait_for_all_instances_to_terminate
|
|
52
|
+
reset!
|
|
53
|
+
while !list_of_terminating_instances.size.zero?
|
|
54
|
+
wait "2.seconds" unless Application.test?
|
|
55
|
+
waited = true
|
|
56
|
+
reset!
|
|
57
|
+
end
|
|
58
|
+
unless Application.test? || waited.nil?
|
|
59
|
+
message "Give some time for the instance ssh to start up"
|
|
60
|
+
wait "15.seconds"
|
|
61
|
+
end
|
|
62
|
+
reset!
|
|
63
|
+
end
|
|
64
|
+
# Configure the master because the master will take care of the rest after that
|
|
65
|
+
def configure_cloud
|
|
66
|
+
message "Configuring master"
|
|
67
|
+
build_and_send_config_files_in_temp_directory
|
|
68
|
+
remote_configure_instances
|
|
69
|
+
|
|
70
|
+
nodes.each do |node|
|
|
71
|
+
node.configure
|
|
72
|
+
end
|
|
73
|
+
end
|
|
74
|
+
before :install_cloud, :add_ssh_key
|
|
75
|
+
after :configure_cloud, :remove_ssh_key
|
|
76
|
+
def add_ssh_key(i)
|
|
77
|
+
Kernel.system("ssh-add #{Application.keypair_path} >/dev/null 2>/dev/null")
|
|
78
|
+
end
|
|
79
|
+
def remove_ssh_key(i)
|
|
80
|
+
Kernel.system("ssh-add -d #{Application.keypair_name} >/dev/null 2>/dev/null")
|
|
81
|
+
end
|
|
82
|
+
def install_cloud(bool=false)
|
|
83
|
+
if Application.install_on_load? || bool
|
|
84
|
+
# Just in case, add the new ubuntu apt-sources as well as updating and fixing the
|
|
85
|
+
# update packages.
|
|
86
|
+
update_apt_string =<<-EOE
|
|
87
|
+
touch /etc/apt/sources.list
|
|
88
|
+
echo 'deb http://mirrors.kernel.org/ubuntu hardy main universe' >> /etc/apt/sources.list
|
|
89
|
+
apt-get update --fix-missing
|
|
90
|
+
EOE
|
|
91
|
+
|
|
92
|
+
ssh(update_apt_string)
|
|
93
|
+
|
|
94
|
+
Provider.install_poolparty
|
|
95
|
+
|
|
96
|
+
# For plugins
|
|
97
|
+
nodes.each do |node|
|
|
98
|
+
node.install
|
|
99
|
+
end
|
|
100
|
+
|
|
101
|
+
end
|
|
102
|
+
end
|
|
103
|
+
def cloud_ips
|
|
104
|
+
@ips ||= nodes.collect {|a| a.ip }
|
|
105
|
+
end
|
|
106
|
+
# Launch the minimum number of instances.
|
|
107
|
+
def launch_minimum_instances
|
|
108
|
+
request_launch_new_instances(Application.minimum_instances - number_of_pending_and_running_instances)
|
|
109
|
+
nodes
|
|
110
|
+
end
|
|
111
|
+
# Start monitoring the cloud with the threaded loop
|
|
112
|
+
def start_monitor!
|
|
113
|
+
begin
|
|
114
|
+
trap("INT") do
|
|
115
|
+
on_exit
|
|
116
|
+
exit
|
|
117
|
+
end
|
|
118
|
+
# Daemonize only if we are not in the test environment
|
|
119
|
+
run_thread_loop(:daemonize => !Application.test?) do
|
|
120
|
+
add_task {PoolParty.message "Checking cloud"}
|
|
121
|
+
add_task {launch_minimum_instances}
|
|
122
|
+
add_task {reconfigure_cloud_when_necessary}
|
|
123
|
+
add_task {scale_cloud!}
|
|
124
|
+
add_task {check_stats}
|
|
125
|
+
end
|
|
126
|
+
rescue Exception => e
|
|
127
|
+
Process.kill("HUP", Process.pid)
|
|
128
|
+
end
|
|
129
|
+
end
|
|
130
|
+
alias_method :start_monitor, :start_monitor!
|
|
131
|
+
def user_tasks
|
|
132
|
+
end
|
|
133
|
+
# Sole purpose to check the stats, mainly in a plugin
|
|
134
|
+
def check_stats
|
|
135
|
+
str = registered_monitors.collect {|m| "#{m}"}
|
|
136
|
+
PoolParty.message "Monitors: #{str.join(", ")}"
|
|
137
|
+
end
|
|
138
|
+
# Add an instance if the cloud needs one ore terminate one if necessary
|
|
139
|
+
def scale_cloud!
|
|
140
|
+
add_instance_if_load_is_high
|
|
141
|
+
terminate_instance_if_load_is_low
|
|
142
|
+
end
|
|
143
|
+
alias_method :scale_cloud, :scale_cloud!
|
|
144
|
+
# Tough method:
|
|
145
|
+
# We need to make sure that all the instances have the required software installed
|
|
146
|
+
# This is a basic check against the local store of the instances that have the
|
|
147
|
+
# stack installed.
|
|
148
|
+
def reconfigure_cloud_when_necessary
|
|
149
|
+
PoolParty.message "#{number_of_unconfigured_nodes} unconfigured nodes"
|
|
150
|
+
configure_cloud if number_of_unconfigured_nodes > 0
|
|
151
|
+
end
|
|
152
|
+
def number_of_unconfigured_nodes
|
|
153
|
+
# TODO: Find a better way to tell if the nodes are configured.
|
|
154
|
+
nodes.reject {|a| a.stack_installed? }.size
|
|
155
|
+
end
|
|
156
|
+
def grow_by(num=1)
|
|
157
|
+
request_launch_new_instances(num)
|
|
158
|
+
|
|
159
|
+
wait_for_all_instances_to_boot
|
|
160
|
+
|
|
161
|
+
reset!
|
|
162
|
+
configure_cloud
|
|
163
|
+
end
|
|
164
|
+
def shrink_by(num=1)
|
|
165
|
+
num.times do |i|
|
|
166
|
+
# Get the last node that is not the master
|
|
167
|
+
node = nodes.reject {|a| a.master? }[-1]
|
|
168
|
+
res = request_termination_of_instance(node.instance_id) if node
|
|
169
|
+
PoolParty.message "#{res ? "Could" : "Could not"} shutdown instance"
|
|
170
|
+
end
|
|
171
|
+
wait_for_all_instances_to_terminate
|
|
172
|
+
configure_cloud
|
|
173
|
+
end
|
|
174
|
+
def make_base_tmp_dir(c)
|
|
175
|
+
`mkdir #{base_tmp_dir}` unless File.directory?(base_tmp_dir)
|
|
176
|
+
end
|
|
177
|
+
before :build_and_send_config_files_in_temp_directory, :make_base_tmp_dir
|
|
178
|
+
def build_and_send_config_files_in_temp_directory
|
|
179
|
+
require 'ftools'
|
|
180
|
+
if File.directory?(Application.plugin_dir)
|
|
181
|
+
Kernel.system("tar -czf #{base_tmp_dir}/plugins.tar.gz #{File.basename(Application.plugin_dir)}")
|
|
182
|
+
end
|
|
183
|
+
|
|
184
|
+
if Master.requires_heartbeat?
|
|
185
|
+
build_and_copy_heartbeat_authkeys_file
|
|
186
|
+
File.copy(get_config_file_for("cloud_master_takeover"), "#{base_tmp_dir}/cloud_master_takeover")
|
|
187
|
+
File.copy(get_config_file_for("heartbeat.conf"), "#{base_tmp_dir}/ha.cf")
|
|
188
|
+
end
|
|
189
|
+
|
|
190
|
+
File.copy(Application.config_file, "#{base_tmp_dir}/config.yml") if Application.config_file && File.exists?(Application.config_file)
|
|
191
|
+
File.copy(Application.keypair_path, "#{base_tmp_dir}/keypair") if File.exists?(Application.keypair_path)
|
|
192
|
+
|
|
193
|
+
copy_pem_files_to_tmp_dir
|
|
194
|
+
|
|
195
|
+
copy_config_files_in_directory_to_tmp_dir("config/resource.d")
|
|
196
|
+
# copy_config_files_in_directory_to_tmp_dir("config/monit.d")
|
|
197
|
+
|
|
198
|
+
build_haproxy_file
|
|
199
|
+
Master.build_user_global_files
|
|
200
|
+
|
|
201
|
+
build_nodes_list
|
|
202
|
+
|
|
203
|
+
Master.with_nodes do |node|
|
|
204
|
+
build_hosts_file_for(node)
|
|
205
|
+
build_reconfigure_instances_script_for(node)
|
|
206
|
+
Master.build_user_node_files_for(node)
|
|
207
|
+
|
|
208
|
+
if Master.requires_heartbeat?
|
|
209
|
+
build_heartbeat_config_file_for(node)
|
|
210
|
+
build_heartbeat_resources_file_for(node)
|
|
211
|
+
end
|
|
212
|
+
end
|
|
213
|
+
end
|
|
214
|
+
def copy_pem_files_to_tmp_dir
|
|
215
|
+
%w(EC2_CERT EC2_PRIVATE_KEY).each do |key|
|
|
216
|
+
begin
|
|
217
|
+
file = `echo $#{key}`.strip
|
|
218
|
+
File.copy(file, "#{base_tmp_dir}/#{File.basename(file)}")
|
|
219
|
+
rescue Exception => e
|
|
220
|
+
end
|
|
221
|
+
end
|
|
222
|
+
end
|
|
223
|
+
def cleanup_tmp_directory(c)
|
|
224
|
+
Dir["#{base_tmp_dir}/*"].each {|f| FileUtils.rm_rf f} if File.directory?("tmp/")
|
|
225
|
+
end
|
|
226
|
+
before :build_and_send_config_files_in_temp_directory, :cleanup_tmp_directory
|
|
227
|
+
# Send the files to the nodes
|
|
228
|
+
def send_config_files_to_nodes(c)
|
|
229
|
+
run_array_of_tasks(rsync_tasks("#{base_tmp_dir}/*", "#{remote_base_tmp_dir}"))
|
|
230
|
+
end
|
|
231
|
+
after :build_and_send_config_files_in_temp_directory, :send_config_files_to_nodes
|
|
232
|
+
def remote_configure_instances
|
|
233
|
+
arr = []
|
|
234
|
+
Master.with_nodes do |node|
|
|
235
|
+
script_file = "#{remote_base_tmp_dir}/#{node.name}-configuration"
|
|
236
|
+
str=<<-EOC
|
|
237
|
+
chmod +x #{script_file}
|
|
238
|
+
/bin/sh #{script_file}
|
|
239
|
+
EOC
|
|
240
|
+
arr << "#{self.class.ssh_string} #{node.ip} '#{str.strip.runnable}'"
|
|
241
|
+
end
|
|
242
|
+
run_array_of_tasks(arr)
|
|
243
|
+
end
|
|
244
|
+
# Add an instance if the load is high
|
|
245
|
+
def add_instance_if_load_is_high
|
|
246
|
+
if expand?
|
|
247
|
+
PoolParty.message "Cloud needs expansion"
|
|
248
|
+
grow_by(1)
|
|
249
|
+
end
|
|
250
|
+
end
|
|
251
|
+
alias_method :add_instance, :add_instance_if_load_is_high
|
|
252
|
+
# Teardown an instance if the load is pretty low
|
|
253
|
+
def terminate_instance_if_load_is_low
|
|
254
|
+
if contract?
|
|
255
|
+
PoolParty.message "Cloud to shrink"
|
|
256
|
+
shrink_by(1)
|
|
257
|
+
end
|
|
258
|
+
end
|
|
259
|
+
alias_method :terminate_instance, :terminate_instance_if_load_is_low
|
|
260
|
+
# FOR MONITORING
|
|
261
|
+
def contract?
|
|
262
|
+
valid_rules?(:contract_when)
|
|
263
|
+
end
|
|
264
|
+
def expand?
|
|
265
|
+
valid_rules?(:expand_when)
|
|
266
|
+
end
|
|
267
|
+
# Restart the running instances services with monit on all the nodes
|
|
268
|
+
def restart_running_instances_services
|
|
269
|
+
nodes.each do |node|
|
|
270
|
+
node.restart_with_monit
|
|
271
|
+
end
|
|
272
|
+
end
|
|
273
|
+
# Build the basic haproxy config file from the config file in the config directory and return a tempfile
|
|
274
|
+
def build_haproxy_file
|
|
275
|
+
write_to_file_for("haproxy") do
|
|
276
|
+
servers=<<-EOS
|
|
277
|
+
#{nodes.collect {|node| node.haproxy_entry}.join("\n")}
|
|
278
|
+
EOS
|
|
279
|
+
open(Application.haproxy_config_file).read.strip ^ {:servers => servers, :host_port => Application.host_port}
|
|
280
|
+
end
|
|
281
|
+
end
|
|
282
|
+
# Build host file for a specific node
|
|
283
|
+
def build_hosts_file_for(n)
|
|
284
|
+
write_to_file_for("hosts", n) do
|
|
285
|
+
"#{nodes.collect {|node| node.ip == n.ip ? node.local_hosts_entry : node.hosts_entry}.join("\n")}"
|
|
286
|
+
end
|
|
287
|
+
end
|
|
288
|
+
def build_nodes_list
|
|
289
|
+
write_to_file_for(RemoteInstance.node_list_name) do
|
|
290
|
+
"#{cloud_ips.join("\n")}"
|
|
291
|
+
end
|
|
292
|
+
end
|
|
293
|
+
# Build the basic auth file for the heartbeat
|
|
294
|
+
def build_and_copy_heartbeat_authkeys_file
|
|
295
|
+
write_to_file_for("authkeys") do
|
|
296
|
+
open(Application.heartbeat_authkeys_config_file).read
|
|
297
|
+
end
|
|
298
|
+
end
|
|
299
|
+
# Build heartbeat config file
|
|
300
|
+
def build_heartbeat_config_file_for(node)
|
|
301
|
+
write_to_file_for("heartbeat", node) do
|
|
302
|
+
servers = "#{node.node_entry}\n#{get_next_node(node).node_entry}" rescue ""
|
|
303
|
+
open(Application.heartbeat_config_file).read.strip ^ {:nodes => servers}
|
|
304
|
+
end
|
|
305
|
+
end
|
|
306
|
+
def build_heartbeat_resources_file_for(node)
|
|
307
|
+
write_to_file_for("haresources", node) do
|
|
308
|
+
"#{node.haproxy_resources_entry}\n#{get_next_node(node).haproxy_resources_entry}" rescue ""
|
|
309
|
+
end
|
|
310
|
+
end
|
|
311
|
+
# Build basic configuration script for the node
|
|
312
|
+
def build_reconfigure_instances_script_for(node)
|
|
313
|
+
write_to_file_for("configuration", node) do
|
|
314
|
+
open(Application.sh_reconfigure_instances_script).read.strip ^ node.configure_tasks( !PoolParty.verbose? )
|
|
315
|
+
end
|
|
316
|
+
end
|
|
317
|
+
|
|
318
|
+
# Try the user's directory before the master directory
|
|
319
|
+
def get_config_file_for(name)
|
|
320
|
+
if File.exists?("#{user_dir}/config/#{name}")
|
|
321
|
+
"#{user_dir}/config/#{name}"
|
|
322
|
+
else
|
|
323
|
+
"#{root_dir}/config/#{name}"
|
|
324
|
+
end
|
|
325
|
+
end
|
|
326
|
+
# Copy all the files in the directory to the dest
|
|
327
|
+
def copy_config_files_in_directory_to_tmp_dir(dir)
|
|
328
|
+
dest_dir = "#{base_tmp_dir}/#{File.basename(dir)}"
|
|
329
|
+
FileUtils.mkdir_p dest_dir
|
|
330
|
+
|
|
331
|
+
if File.directory?("#{user_dir}/#{dir}")
|
|
332
|
+
Dir["#{user_dir}/#{dir}/*"].each do |file|
|
|
333
|
+
File.copy(file, dest_dir)
|
|
334
|
+
end
|
|
335
|
+
else
|
|
336
|
+
Dir["#{root_dir}/#{dir}/*"].each do |file|
|
|
337
|
+
File.copy(file, dest_dir)
|
|
338
|
+
end
|
|
339
|
+
end
|
|
340
|
+
end
|
|
341
|
+
# Return a list of the nodes and cache them
|
|
342
|
+
def nodes
|
|
343
|
+
@nodes ||= list_of_nonterminated_instances.collect_with_index do |inst, i|
|
|
344
|
+
RemoteInstance.new(inst.merge({:number => i}))
|
|
345
|
+
end
|
|
346
|
+
end
|
|
347
|
+
# Return a list of the nodes for each keypair and cache them
|
|
348
|
+
def cloud_nodes
|
|
349
|
+
@cloud_nodes ||= begin
|
|
350
|
+
nodes_list = []
|
|
351
|
+
cloud_keypairs.each {|keypair|
|
|
352
|
+
list_of_nonterminated_instances(list_of_instances(keypair)).collect_with_index { |inst, i|
|
|
353
|
+
nodes_list << RemoteInstance.new(inst.merge({:number => i}))
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
nodes_list
|
|
357
|
+
end
|
|
358
|
+
end
|
|
359
|
+
# Get the node at the specific index from the cached nodes
|
|
360
|
+
def get_node(i=0)
|
|
361
|
+
nodes.select {|a| a.number == i.to_i}.first
|
|
362
|
+
end
|
|
363
|
+
# Get the next node in sequence, so we can configure heartbeat to monitor the next node
|
|
364
|
+
def get_next_node(node)
|
|
365
|
+
i = node.number + 1
|
|
366
|
+
i = 0 if i >= nodes.size
|
|
367
|
+
get_node(i)
|
|
368
|
+
end
|
|
369
|
+
# On exit command
|
|
370
|
+
def on_exit
|
|
371
|
+
end
|
|
372
|
+
# List the clouds
|
|
373
|
+
def list
|
|
374
|
+
if number_of_pending_and_running_instances > 0
|
|
375
|
+
out = "-- CLOUD (#{number_of_pending_and_running_instances})--\n"
|
|
376
|
+
out << nodes.collect {|node| node.description }.join("\n")
|
|
377
|
+
else
|
|
378
|
+
out = "Cloud is not running"
|
|
379
|
+
end
|
|
380
|
+
out
|
|
381
|
+
end
|
|
382
|
+
def clouds_list
|
|
383
|
+
if number_of_all_pending_and_running_instances > 0
|
|
384
|
+
out = "-- ALL CLOUDS (#{number_of_all_pending_and_running_instances})--\n"
|
|
385
|
+
keypair = nil
|
|
386
|
+
out << cloud_nodes.collect {|node|
|
|
387
|
+
str = ""
|
|
388
|
+
if keypair != node.keypair
|
|
389
|
+
keypair = node.keypair;
|
|
390
|
+
str = "key pair: #{keypair} (#{number_of_pending_and_running_instances(keypair)})\n"
|
|
391
|
+
end
|
|
392
|
+
str += "\t"+node.description if !node.description.nil?
|
|
393
|
+
}.join("\n")
|
|
394
|
+
else
|
|
395
|
+
out = "Clouds are not running"
|
|
396
|
+
end
|
|
397
|
+
out
|
|
398
|
+
end
|
|
399
|
+
# Reset and clear the caches
|
|
400
|
+
def reset!
|
|
401
|
+
@cached_descriptions = nil
|
|
402
|
+
@nodes = nil
|
|
403
|
+
@cloud_nodes = nil
|
|
404
|
+
end
|
|
405
|
+
|
|
406
|
+
class << self
|
|
407
|
+
include PoolParty
|
|
408
|
+
include FileWriter
|
|
409
|
+
|
|
410
|
+
def with_nodes(&block)
|
|
411
|
+
new.nodes.each &block
|
|
412
|
+
end
|
|
413
|
+
|
|
414
|
+
def collect_nodes(&block)
|
|
415
|
+
new.nodes.collect &block
|
|
416
|
+
end
|
|
417
|
+
|
|
418
|
+
def requires_heartbeat?
|
|
419
|
+
new.nodes.size > 1
|
|
420
|
+
end
|
|
421
|
+
def is_master_responding?
|
|
422
|
+
`ping -c1 -t5 #{get_master.ip}`
|
|
423
|
+
end
|
|
424
|
+
def get_master
|
|
425
|
+
new.nodes[0]
|
|
426
|
+
end
|
|
427
|
+
def cloud_ips
|
|
428
|
+
new.cloud_ips
|
|
429
|
+
end
|
|
430
|
+
def get_next_node(node)
|
|
431
|
+
new.get_next_node(node)
|
|
432
|
+
end
|
|
433
|
+
def set_hosts(c, remotetask=nil)
|
|
434
|
+
unless remotetask.nil?
|
|
435
|
+
rt = remotetask
|
|
436
|
+
end
|
|
437
|
+
|
|
438
|
+
ssh_location = `which ssh`.gsub(/\n/, '')
|
|
439
|
+
rsync_location = `which rsync`.gsub(/\n/, '')
|
|
440
|
+
rt.set :user, Application.username
|
|
441
|
+
# rt.set :domain, "#{Application.user}@#{ip}"
|
|
442
|
+
rt.set :application, Application.app_name
|
|
443
|
+
rt.set :ssh_flags, "-i #{Application.keypair_path} -o StrictHostKeyChecking=no"
|
|
444
|
+
rt.set :rsync_flags , ['-azP', '--delete', "-e '#{ssh_location} -l #{Application.username} -i #{Application.keypair_path} -o StrictHostKeyChecking=no'"]
|
|
445
|
+
|
|
446
|
+
master = get_master
|
|
447
|
+
rt.set :domain, "#{master.ip}" if master
|
|
448
|
+
Master.with_nodes { |node|
|
|
449
|
+
rt.host "#{Application.username}@#{node.ip}",:app if node.status =~ /running/
|
|
450
|
+
}
|
|
451
|
+
end
|
|
452
|
+
|
|
453
|
+
def ssh_configure_string_for(node)
|
|
454
|
+
cmd=<<-EOC
|
|
455
|
+
#{node.update_plugin_string(node)}
|
|
456
|
+
pool maintain -c ~/.config -l #{PoolParty.plugin_dir}
|
|
457
|
+
hostname -v #{node.name}
|
|
458
|
+
/usr/bin/s3fs #{Application.shared_bucket} -o accessKeyId=#{Application.access_key} -o secretAccessKey=#{Application.secret_access_key} -o nonempty /data
|
|
459
|
+
EOC
|
|
460
|
+
end
|
|
461
|
+
def build_haproxy_file
|
|
462
|
+
servers=<<-EOS
|
|
463
|
+
#{collect_nodes {|node| node.haproxy_entry}.join("\n")}
|
|
464
|
+
EOS
|
|
465
|
+
open(Application.haproxy_config_file).read.strip ^ {:servers => servers, :host_port => Application.host_port}
|
|
466
|
+
end
|
|
467
|
+
|
|
468
|
+
# Placeholders
|
|
469
|
+
def build_user_global_files
|
|
470
|
+
global_user_files.each do |arr|
|
|
471
|
+
write_to_file_for(arr[0]) &arr[1]
|
|
472
|
+
end
|
|
473
|
+
end
|
|
474
|
+
def build_user_node_files_for(node)
|
|
475
|
+
user_node_files.each do |arr|
|
|
476
|
+
write_to_file_for(arr[0], node) do
|
|
477
|
+
arr[1].call(node)
|
|
478
|
+
end
|
|
479
|
+
end
|
|
480
|
+
end
|
|
481
|
+
def define_global_user_file(name, &block)
|
|
482
|
+
global_user_files << [name, block]
|
|
483
|
+
end
|
|
484
|
+
def global_user_files;@global_user_files ||= [];end
|
|
485
|
+
def define_node_user_file(name, &block)
|
|
486
|
+
user_node_files << [name, block]
|
|
487
|
+
end
|
|
488
|
+
def user_node_files;@user_node_files ||= [];end
|
|
489
|
+
end
|
|
490
|
+
|
|
491
|
+
end
|
|
492
|
+
end
|