haas 0.0.5

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: 6e8a464110c902c14ef830dc3a2171248f3aa258
4
+ data.tar.gz: 760ca88c7ee422e94b7be779b258d142f1381730
5
+ SHA512:
6
+ metadata.gz: 35e900d38e69ca79d7a20c66e27fb2d598b6abe001396bbaa29b85eea601f1165012bcfd10d340f8bc96a2d344480e712e049350af71393d9c0e883bc3269864
7
+ data.tar.gz: 671575fd90a0a218452f868e580d2f164364eb8baa53ce156b3ce07d4d1ad521faf8e9215f438dd8f4926318adfbcce432994bb51acd30d7b31eeffb2167cf24
@@ -0,0 +1,58 @@
1
+ #!/usr/bin/env ruby
2
+ require 'rubygems'
3
+ require 'haas'
4
+ require 'mixlib/cli'
5
+
6
+ class HaasCLI
7
+ include Mixlib::CLI
8
+
9
+ option :launch,
10
+ :long => "--launch",
11
+ :description => "Launch and install a cluster"
12
+
13
+ option :nb_instances,
14
+ :long => "--nb-instances COUNT",
15
+ :description => "The number of instances you want to launch",
16
+ :default => "5"
17
+
18
+ option :instance_type,
19
+ :long => "--instance-type TYPE",
20
+ :description => "The type of the EC2 instance you want to launch",
21
+ :default => "m3.large"
22
+
23
+ option :aws_region,
24
+ :long => "--aws-region REGION",
25
+ :description => "The AWS region used",
26
+ :default => "us-east-1"
27
+
28
+ option :list,
29
+ :long => "--list",
30
+ :description => "Show all the configured clusters and instances"
31
+
32
+ option :terminate,
33
+ :long => "--terminate",
34
+ :description => "Terminate the cluster"
35
+
36
+ option :help,
37
+ :short => "-h",
38
+ :long => "--help",
39
+ :description => "Show this message",
40
+ :on => :tail,
41
+ :boolean => true,
42
+ :show_options => true,
43
+ :exit => 0
44
+
45
+ end
46
+
47
+ cli = HaasCLI.new
48
+ cli.parse_options
49
+
50
+ Haas::Config.set_options cli.config
51
+
52
+ if cli.config[:launch]
53
+ Haas.launch
54
+ elsif cli.config[:list]
55
+ Haas.show
56
+ elsif cli.config[:terminate]
57
+ Haas.terminate Haas::Cluster.last
58
+ end
@@ -0,0 +1,67 @@
1
+ require 'active_record'
2
+ require 'haas/config'
3
+ require 'models/key_pair'
4
+ require 'models/node'
5
+ require 'models/cluster'
6
+ require 'haas/aws'
7
+ require 'haas/chef'
8
+ require 'haas/blueprints'
9
+ require 'haas/utils'
10
+
11
+ class Haas
12
+ def self.launch
13
+ Haas::Aws.connect
14
+ @cluster=Haas::Cluster.create(
15
+ :aws_region => Haas::Aws.region,
16
+ :ssh_user => "root"
17
+ )
18
+ if Haas::Aws.nb_instance_available >= Haas::Config.options[:nb_instances].to_i
19
+ Haas::Aws.create_key_pair
20
+ Haas::Aws.launch_instances
21
+ else
22
+ puts "There is not enough instances available.\nYou can request a limit increase here : https://aws.amazon.com/support/createCase?serviceLimitIncreaseType=ec2-instances&type=service_limit_increase"
23
+ exit
24
+ end
25
+
26
+ Haas::ChefProvider.setup_cluster
27
+ Haas::Blueprints.post_blueprints
28
+
29
+ puts "\n"
30
+ puts "=========== installation report =============="
31
+ puts "Ambari is finalizing the installation"
32
+ puts "You can access Ambari to manage your cluster at the following address:"
33
+ puts "http://#{@cluster.get_ambari_server.public_dns_name}:8080/"
34
+ puts "user: admin"
35
+ puts "password: admin"
36
+ puts "\n"
37
+ puts "Nodes of the cluster:"
38
+ @cluster.nodes.each do |node|
39
+ puts " #{node.public_dns_name}"
40
+ end
41
+ puts "\n"
42
+ puts "You can use this SSH key to log into each node as user #{@cluster.ssh_user}"
43
+ puts @cluster.identity_file_path
44
+ end
45
+
46
+ def self.show
47
+ Haas::Cluster.all.each do |cluster|
48
+ puts "Cluster - #{cluster.name}"
49
+ cluster.nodes.each do |node|
50
+ puts " #{node.instance_id} - #{node.ip_address} - #{node.private_ip_address}"
51
+ end
52
+ end
53
+ end
54
+
55
+ def self.terminate cluster_name
56
+ Haas::Aws.connect
57
+ Haas::Aws.terminate_cluster Cluster.first
58
+ end
59
+
60
+ def self.cluster
61
+ return @cluster
62
+ end
63
+
64
+ def self.set_cluster cluster
65
+ @cluster = cluster
66
+ end
67
+ end
@@ -0,0 +1,143 @@
1
+ require 'aws-sdk'
2
+
3
+ class Haas
4
+ class Aws
5
+ CENTOS_IMAGES = {
6
+ "6.5" => {
7
+ "us-east-1"=>"ami-8997afe0",
8
+ "us-west-2"=>"ami-b6bdde86",
9
+ "us-west-1"=>"ami-1a013c5f",
10
+ "eu-west-1"=>"ami-42718735",
11
+ "ap-southeast-1"=>"ami-a08fd9f2",
12
+ "ap-southeast-2"=>"ami-e7138ddd",
13
+ "ap-northeast-1"=>"ami-81294380",
14
+ "sa-east-1"=>"ami-7d02a260"
15
+ },
16
+ "7" => {
17
+ "us-east-1"=>"ami-96a818fe",
18
+ "us-west-2"=>"ami-c7d092f7",
19
+ "us-west-1"=>"ami-6bcfc42e",
20
+ "eu-west-1"=>"ami-e4ff5c93",
21
+ "ap-southeast-1"=>"ami-aea582fc",
22
+ "ap-southeast-2"=>"ami-bd523087",
23
+ "ap-northeast-1"=>"ami-89634988",
24
+ "sa-east-1"=>"ami-bf9520a2"
25
+ }
26
+ }
27
+
28
+ def self.connect
29
+ @region = Haas::Config.options[:aws_region] || 'us-east-1'
30
+ AWS.config(
31
+ access_key_id: ENV['AWS_KEY'],
32
+ secret_access_key: ENV['AWS_SECRET'],
33
+ region: region
34
+ )
35
+ @ec2 = AWS::EC2.new
36
+ end
37
+
38
+ def self.ec2
39
+ @ec2
40
+ end
41
+
42
+ def self.region
43
+ @region
44
+ end
45
+
46
+ def self.nb_instance_available
47
+ account_attributes = ec2.client.describe_account_attributes\
48
+ .data[:account_attribute_set]\
49
+ .inject({}) do |m, i|
50
+ m[i[:attribute_name]] = i[:attribute_value_set].first[:attribute_value]; m
51
+ end
52
+
53
+ max_instances = account_attributes["max-instances"].to_i
54
+ return max_instances - nb_running_instances
55
+ end
56
+
57
+ def self.nb_running_instances
58
+ ec2.instances.inject({}) { |m, i| i.status == :running ? m[i.id] = i.status : nil; m }.length
59
+ end
60
+
61
+ def self.create_key_pair
62
+ key_pair = Haas::KeyPair.create(name: Haas.cluster.name)
63
+ File.write(Haas.cluster.identity_file_path, key_pair.private_key)
64
+ File.chmod(0600, Haas.cluster.identity_file_path)
65
+ end
66
+
67
+ def self.launch_instances
68
+ image_id = CENTOS_IMAGES["6.5"][region]
69
+
70
+ if !ec2.security_groups.filter('group-name', 'haas-security-group').first
71
+ security_group = ec2.security_groups.create('haas-security-group')
72
+ security_group.authorize_ingress(:tcp, 22)
73
+ security_group.authorize_ingress(:tcp, 80)
74
+ security_group.authorize_ingress(:tcp, 443)
75
+ security_group.authorize_ingress(:tcp, 8080)
76
+ security_group.authorize_ingress(:tcp, 0..65535, security_group)
77
+ security_group.authorize_ingress(:udp, 0..65535, security_group)
78
+ security_group.authorize_ingress(:icmp, -1, security_group)
79
+ end
80
+
81
+ instances = ec2.instances.create({
82
+ :image_id => image_id,
83
+ :instance_type => Haas::Config.options[:instance_type],
84
+ :key_name => Haas.cluster.name,
85
+ :security_groups => ['haas-security-group'],
86
+ :block_device_mappings => [
87
+ {
88
+ :device_name => "/dev/sda",
89
+ :ebs => {
90
+ :volume_size => 8, # 8 GiB
91
+ :delete_on_termination => true
92
+ }
93
+ },
94
+ {
95
+ :device_name => "/dev/sdf",
96
+ :virtual_name => "ephemeral0"
97
+ }
98
+ ],
99
+ :count => Haas::Config.options[:nb_instances].to_i
100
+ })
101
+
102
+ print "Waiting for the instances to start "
103
+ while instances.any? {|i| i.status == :pending; } do
104
+ print '.'
105
+ sleep 1
106
+ end
107
+ puts " done"
108
+
109
+ print "Waiting for the instances to be initialized and accessible "
110
+ while !is_cluster_ssh_open?(instances) do
111
+ print '.'
112
+ sleep 1
113
+ end
114
+ puts " done"
115
+
116
+ instances.each do |instance|
117
+ Haas::Node.create(
118
+ cluster_id: Haas.cluster.id,
119
+ instance_id: instance.id,
120
+ public_ip_address: instance.ip_address,
121
+ public_dns_name: instance.public_dns_name,
122
+ private_ip_address: instance.private_ip_address,
123
+ private_dns_name: instance.private_dns_name
124
+ )
125
+ end
126
+ end
127
+
128
+ def self.terminate_cluster cluster
129
+ ec2.client.terminate_instances({
130
+ instance_ids: cluster.nodes.map(&:instance_id)
131
+ })
132
+ cluster.destroy
133
+ end
134
+
135
+ def self.is_cluster_ssh_open?(instances)
136
+ instances.each do |instance|
137
+ return false unless Haas::Utils.is_port_open?(instance.public_dns_name,22)
138
+ end
139
+ return true
140
+ end
141
+
142
+ end
143
+ end
@@ -0,0 +1,134 @@
1
+ class Haas
2
+ class Blueprints
3
+
4
+ def self.post_blueprints
5
+ ambari = Haas.cluster.get_ambari_server
6
+ puts "Wait until ambari server is launched"
7
+ while !Haas::Utils.is_port_open?(ambari.public_dns_name,8080)
8
+ print '.'
9
+ sleep 1
10
+ end
11
+ puts ' done.'
12
+
13
+ puts "Wait 20 seconds for the nodes to connect to ambari."
14
+ sleep 20
15
+ puts "Done."
16
+
17
+ post_json(ambari.public_dns_name,8080,'/api/v1/blueprints/haas-blueprint',get_blueprint)
18
+ post_json(ambari.public_dns_name,8080,'/api/v1/clusters/haas-cluster',get_cluster)
19
+ end
20
+
21
+ def self.post_json(host, port, url, params)
22
+ req = Net::HTTP::Post.new(url)
23
+ req.body = params.to_json
24
+ req.basic_auth("admin", "admin")
25
+ req["X-Requested-By"] = "HaaS"
26
+ response = Net::HTTP.new(host, port).start {|http| http.request(req) }
27
+ end
28
+
29
+ def self.get_blueprint
30
+ {
31
+ "host_groups" => [
32
+ {
33
+ "name" => "master",
34
+ "configurations" => [
35
+ {
36
+ "global" => {
37
+ "nagios_contact" => "me@my-awesome-domain.example"
38
+ }
39
+ }
40
+ ],
41
+ "components" => [
42
+ {
43
+ "name" => "NAMENODE"
44
+ },
45
+ {
46
+ "name" => "SECONDARY_NAMENODE"
47
+ },
48
+ {
49
+ "name" => "RESOURCEMANAGER"
50
+ },
51
+ {
52
+ "name" => "HISTORYSERVER"
53
+ },
54
+ {
55
+ "name" => "NAGIOS_SERVER"
56
+ },
57
+ {
58
+ "name" => "GANGLIA_SERVER"
59
+ },
60
+ {
61
+ "name" => "ZOOKEEPER_SERVER"
62
+ },
63
+ {
64
+ "name" => "GANGLIA_MONITOR"
65
+ }
66
+ ],
67
+ "cardinality" => "1"
68
+ },
69
+ {
70
+ "name" => "slaves",
71
+ "components" => [
72
+ {
73
+ "name" => "DATANODE"
74
+ },
75
+ {
76
+ "name" => "HDFS_CLIENT"
77
+ },
78
+ {
79
+ "name" => "NODEMANAGER"
80
+ },
81
+ {
82
+ "name" => "YARN_CLIENT"
83
+ },
84
+ {
85
+ "name" => "MAPREDUCE2_CLIENT"
86
+ },
87
+ {
88
+ "name" => "ZOOKEEPER_CLIENT"
89
+ },
90
+ {
91
+ "name" => "GANGLIA_MONITOR"
92
+ }
93
+ ],
94
+ "cardinality" => "1+"
95
+ }
96
+ ],
97
+ "Blueprints" => {
98
+ "stack_name" => "HDP",
99
+ "stack_version" => "2.1"
100
+ }
101
+ }
102
+ end
103
+
104
+
105
+ def self.get_cluster
106
+ masters = []
107
+ slaves = []
108
+ nb_masters = 1
109
+ Haas.cluster.nodes.each do |node|
110
+ if masters.length < nb_masters
111
+ masters << { "fqdn" => node.private_dns_name }
112
+ else
113
+ slaves << { "fqdn" => node.private_dns_name }
114
+ end
115
+ end
116
+
117
+ {
118
+ "blueprint" => "haas-blueprint",
119
+ "default_password" => "my-super-secret-password",
120
+ "host_groups" => [
121
+ {
122
+ "name" => "master",
123
+ "hosts" => masters
124
+ },
125
+ {
126
+ "name" => "slaves",
127
+ "hosts" => slaves
128
+ }
129
+ ]
130
+ }
131
+ end
132
+
133
+ end
134
+ end
@@ -0,0 +1,158 @@
1
+ class Haas
2
+ class ChefProvider
3
+
4
+ COOKBOOK_PATH = File.join(Haas::Config::WORKING_DIR, 'cookbooks')
5
+
6
+ def self.setup_cluster
7
+ install_chef_server
8
+ write_knife_config_file
9
+ cookbooks=[{'name' => 'ambari','url' => "https://supermarket.getchef.com/cookbooks/ambari/download" }]
10
+ cookbooks.each do |cb|
11
+ download_cookbook cb['name'], cb['url']
12
+ end
13
+ upload_cookbook
14
+ setup_environment
15
+ threads = []
16
+ Haas.cluster.nodes.each do |node|
17
+ threads << Thread.new { bootstrap_node(node) }
18
+ end
19
+ threads.each { |thr| thr.join }
20
+ end
21
+
22
+ def self.install_chef_server
23
+ require 'net/ssh'
24
+ chef_server = Haas.cluster.get_chef_server
25
+ user = Haas.cluster.ssh_user
26
+ chef_server_file = "chef-server-core-12.0.0_rc.5-1.el5.x86_64.rpm"
27
+ chef_server_url = "https://packagecloud.io/chef/stable/download?distro=6&filename=#{chef_server_file}"
28
+ chef_server_local_path = "/tmp/#{chef_server_file}"
29
+
30
+ Net::SSH.start(
31
+ chef_server.public_dns_name, user,
32
+ :host_key => "ssh-rsa",
33
+ :encryption => "blowfish-cbc",
34
+ :keys => [ Haas.cluster.identity_file_path ],
35
+ :compression => "zlib"
36
+ ) do |ssh|
37
+ puts "Entering chef server installation on the node #{chef_server.public_dns_name}. This may take a while."
38
+ puts "Disable iptables"
39
+ ssh.exec!("service iptables stop")
40
+ puts "Downloading and installing the chef server."
41
+ ssh.exec!(%{
42
+ until curl -L '#{chef_server_url}' -o #{chef_server_local_path} && rpm -ivh #{chef_server_local_path}; do
43
+ echo "installing chef server";
44
+ done
45
+ })
46
+ ssh.exec!("rpm -ivh #{chef_server_local_path}")
47
+ puts "Configuring chef server."
48
+ ssh.exec!("mkdir -p /etc/opscode/")
49
+ ssh.exec!(%{echo "nginx['non_ssl_port'] = false" >> /etc/opscode/chef-server.rb})
50
+ ssh.exec!("chef-server-ctl reconfigure")
51
+
52
+ client_key = ""
53
+ while !client_key.include?("BEGIN RSA PRIVATE KEY") do
54
+ client_key = ssh.exec!("chef-server-ctl user-create haas-api HAAS Api haas@ossom.io abc123")
55
+ end
56
+ File.write(Haas.cluster.chef_client_pem_path, client_key)
57
+
58
+ org_validator_key = ssh.exec!("chef-server-ctl org-create haas Hadoop as a Service --association_user haas-api")
59
+ File.write(Haas.cluster.chef_validator_pem_path, org_validator_key)
60
+ end
61
+ end
62
+
63
+ def self.write_knife_config_file
64
+ conf = %{
65
+ log_level :info
66
+ log_location STDOUT
67
+ node_name "haas-api"
68
+ client_key "#{Haas.cluster.chef_client_pem_path}"
69
+ validation_client_name "haas-validator"
70
+ validation_key "#{Haas.cluster.chef_validator_pem_path}"
71
+ chef_server_url "https://#{Haas.cluster.get_chef_server.public_dns_name}/organizations/haas"
72
+ cache_type 'BasicFile'
73
+ cache_options( :path => "#{ENV['HOME']}/.chef/checksums" )
74
+ cookbook_path ["#{COOKBOOK_PATH}"]
75
+ environment "#{Haas.cluster.name}"
76
+ }
77
+
78
+ File.write(Haas.cluster.knife_config_path, conf)
79
+ end
80
+
81
+
82
+ def self.bootstrap_node node
83
+ require 'chef'
84
+ require 'chef/knife'
85
+ require 'chef/knife/bootstrap'
86
+ require 'chef/knife/core/bootstrap_context'
87
+ require 'chef/knife/ssh'
88
+ require 'net/ssh'
89
+ require 'net/ssh/multi'
90
+
91
+ puts "Bootstrapping node #{node.public_dns_name}"
92
+
93
+ user = Haas.cluster.ssh_user
94
+ run_list = ["recipe[ambari::agent]"]
95
+ run_list << "recipe[ambari::server]" if node.ambari_server
96
+
97
+ Chef::Config.from_file(Haas.cluster.knife_config_path)
98
+ kb = Chef::Knife::Bootstrap.new
99
+ kb.config[:ssh_user] = user
100
+ kb.config[:run_list] = run_list
101
+ kb.config[:use_sudo] = true
102
+ kb.config[:identity_file] = Haas.cluster.identity_file_path
103
+ kb.config[:distro] = 'chef-full'
104
+ kb.name_args = [node.public_dns_name]
105
+ kb.run
106
+ end
107
+
108
+ def self.download_cookbook cookbook_name, url
109
+ require 'open-uri'
110
+ require 'zlib'
111
+ require 'archive/tar/minitar'
112
+
113
+ cookbooks_dir = File.join(Haas::Config::WORKING_DIR, 'cookbooks')
114
+ Dir.mkdir(cookbooks_dir) unless File.exists?(cookbooks_dir)
115
+ archive_path = File.join(cookbooks_dir, "#{cookbook_name}.tar.gz")
116
+ open(archive_path, 'wb') do |file|
117
+ file << open(url).read
118
+ end
119
+ tgz = Zlib::GzipReader.new(File.open(archive_path, 'rb'))
120
+ Archive::Tar::Minitar.unpack(tgz, cookbooks_dir)
121
+ end
122
+
123
+ def self.upload_cookbook
124
+ require 'chef'
125
+ require 'chef/cookbook_uploader'
126
+
127
+ puts "Uploading cookbooks to the chef server."
128
+
129
+ Chef::Config.from_file(Haas.cluster.knife_config_path)
130
+ cookbook_repo = Chef::CookbookLoader.new(COOKBOOK_PATH)
131
+ cookbook_repo.load_cookbooks
132
+ cbs = []
133
+ cookbook_repo.each do |cookbook_name, cookbook|
134
+ cbs << cookbook
135
+ end
136
+ Chef::CookbookUploader.new(cbs,:force => false, :concurrency => 10).upload_cookbooks
137
+ end
138
+
139
+ def self.setup_environment
140
+ require 'chef/environment'
141
+ require 'chef/rest'
142
+ ambari_server_fqdn = Haas.cluster.get_ambari_server
143
+
144
+ override_attributes = {
145
+ :ambari => {
146
+ :server_fqdn => ambari_server_fqdn.private_dns_name
147
+ }
148
+ }
149
+
150
+ Chef::Config.from_file(Haas.cluster.knife_config_path)
151
+ environment = Chef::Environment.new
152
+ environment.name(Haas.cluster.name)
153
+ environment.description("haas hadoop cluster")
154
+ environment.override_attributes(override_attributes)
155
+ environment.save
156
+ end
157
+ end
158
+ end
@@ -0,0 +1,56 @@
1
+ class Haas
2
+ class Config
3
+ WORKING_DIR=File.join(File.expand_path('~'), '.haas')
4
+
5
+ def self.set_options options
6
+ @options = options
7
+ end
8
+
9
+ def self.options
10
+ @options
11
+ end
12
+
13
+ # Create Haas folder
14
+
15
+ Dir.mkdir(Haas::Config::WORKING_DIR) unless File.exists?(Haas::Config::WORKING_DIR)
16
+
17
+ ############ create sqlite db in memory ############
18
+
19
+ SQLITE_DB = ENV['SQLITE_DB'] || File.join(Haas::Config::WORKING_DIR,"haas_sqlite3.db")
20
+
21
+ ActiveRecord::Base.establish_connection(
22
+ adapter: "sqlite3",
23
+ database: SQLITE_DB
24
+ )
25
+
26
+ if !File.file?(SQLITE_DB)
27
+ ActiveRecord::Migration.verbose = false
28
+ ActiveRecord::Schema.define do
29
+ create_table :key_pairs do |table|
30
+ table.column :name, :string
31
+ table.column :private_key, :string
32
+ end
33
+ add_index :key_pairs, :name, unique: true
34
+
35
+ create_table :clusters do |table|
36
+ table.column :name, :string
37
+ table.column :aws_region, :string
38
+ table.column :ssh_user, :string
39
+ end
40
+ add_index :clusters, :name, unique: true
41
+
42
+ create_table :nodes do |table|
43
+ table.column :instance_id, :string
44
+ table.column :public_ip_address, :string
45
+ table.column :public_dns_name, :string
46
+ table.column :private_ip_address, :string
47
+ table.column :private_dns_name, :string
48
+ table.column :chef_server, :boolean
49
+ table.column :ambari_server, :boolean
50
+ table.column :cluster_id, :integer
51
+ end
52
+ end
53
+ end
54
+
55
+ end
56
+ end
@@ -0,0 +1,23 @@
1
+ class Haas
2
+ class Utils
3
+
4
+ def self.is_port_open?(ip, port)
5
+ require 'socket'
6
+ require 'timeout'
7
+ begin
8
+ Timeout::timeout(1) do
9
+ begin
10
+ s = TCPSocket.new(ip, port)
11
+ s.close
12
+ return true
13
+ rescue Errno::ECONNREFUSED, Errno::EHOSTUNREACH
14
+ return false
15
+ end
16
+ end
17
+ rescue Timeout::Error
18
+ end
19
+ return false
20
+ end
21
+
22
+ end
23
+ end
@@ -0,0 +1,61 @@
1
+ class Haas
2
+ class Cluster < ActiveRecord::Base
3
+ before_create :generate_name
4
+ after_create :mkdir_cluster_home
5
+ has_many :nodes, dependent: :destroy
6
+
7
+ def mkdir_cluster_home
8
+ require 'fileutils'
9
+ FileUtils.mkdir_p self.working_dir_path
10
+ end
11
+
12
+ def working_dir_path
13
+ File.join(Haas::Config::WORKING_DIR, self.name)
14
+ end
15
+
16
+ def identity_file_path
17
+ File.join(self.working_dir_path,"ssh-#{self.name}.pem")
18
+ end
19
+
20
+ def chef_client_pem_path
21
+ File.join(self.working_dir_path,"haas-api.pem")
22
+ end
23
+
24
+ def chef_validator_pem_path
25
+ File.join(self.working_dir_path,"haas-validator.pem")
26
+ end
27
+
28
+ def knife_config_path
29
+ File.join(self.working_dir_path,"knife.rb")
30
+ end
31
+
32
+ def generate_name
33
+ random_str = (0...8).map { (65 + rand(26)).chr }.join
34
+ self.name = "HAAS-#{random_str}"
35
+ end
36
+
37
+ def get_chef_server
38
+ chef_server = self.nodes.where('nodes.chef_server=?',true)
39
+ if chef_server.first
40
+ return chef_server.first
41
+ else
42
+ node = self.nodes.first
43
+ node.chef_server = true
44
+ node.save
45
+ return node
46
+ end
47
+ end
48
+
49
+ def get_ambari_server
50
+ ambari_server = self.nodes.where('nodes.ambari_server=?',true)
51
+ if ambari_server.first
52
+ return ambari_server.first
53
+ else
54
+ node = self.nodes.where('nodes.chef_server IS NULL').first
55
+ node.ambari_server = true
56
+ node.save
57
+ return node
58
+ end
59
+ end
60
+ end
61
+ end
@@ -0,0 +1,12 @@
1
+ class Haas
2
+ class KeyPair < ActiveRecord::Base
3
+ validates_uniqueness_of :name
4
+ before_create :get_private_key
5
+
6
+ def get_private_key
7
+ collection = AWS::EC2::KeyPairCollection.new
8
+ key = collection.create self.name
9
+ self.private_key = key.private_key
10
+ end
11
+ end
12
+ end
@@ -0,0 +1,5 @@
1
+ class Haas
2
+ class Node < ActiveRecord::Base
3
+ belongs_to :cluster
4
+ end
5
+ end
@@ -0,0 +1,12 @@
1
+ current_dir = <%= ENV["HOME"] %>/.haas/
2
+
3
+ log_level :info
4
+ log_location STDOUT
5
+ node_name "haas"
6
+ client_key "#{current_dir}/haas-api.pem"
7
+ validation_client_name "haas"
8
+ validation_key "#{current_dir}/haas-validator.pem"
9
+ chef_server_url "https://192.168.20.12/organizations/haas"
10
+ cache_type 'BasicFile'
11
+ cache_options( :path => "#{ENV['HOME']}/.chef/checksums" )
12
+ cookbook_path ["#{current_dir}/../cookbooks"]
metadata ADDED
@@ -0,0 +1,209 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: haas
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.0.5
5
+ platform: ruby
6
+ authors:
7
+ - Julien Pellet
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2014-11-22 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: activerecord
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - "~>"
18
+ - !ruby/object:Gem::Version
19
+ version: '4.1'
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - "~>"
25
+ - !ruby/object:Gem::Version
26
+ version: '4.1'
27
+ - !ruby/object:Gem::Dependency
28
+ name: sqlite3
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - "~>"
32
+ - !ruby/object:Gem::Version
33
+ version: '1.3'
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - "~>"
39
+ - !ruby/object:Gem::Version
40
+ version: '1.3'
41
+ - !ruby/object:Gem::Dependency
42
+ name: aws-sdk
43
+ requirement: !ruby/object:Gem::Requirement
44
+ requirements:
45
+ - - "~>"
46
+ - !ruby/object:Gem::Version
47
+ version: '1.55'
48
+ type: :runtime
49
+ prerelease: false
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - "~>"
53
+ - !ruby/object:Gem::Version
54
+ version: '1.55'
55
+ - !ruby/object:Gem::Dependency
56
+ name: net-ssh
57
+ requirement: !ruby/object:Gem::Requirement
58
+ requirements:
59
+ - - "~>"
60
+ - !ruby/object:Gem::Version
61
+ version: '2.9'
62
+ type: :runtime
63
+ prerelease: false
64
+ version_requirements: !ruby/object:Gem::Requirement
65
+ requirements:
66
+ - - "~>"
67
+ - !ruby/object:Gem::Version
68
+ version: '2.9'
69
+ - !ruby/object:Gem::Dependency
70
+ name: chef
71
+ requirement: !ruby/object:Gem::Requirement
72
+ requirements:
73
+ - - "~>"
74
+ - !ruby/object:Gem::Version
75
+ version: '11.16'
76
+ type: :runtime
77
+ prerelease: false
78
+ version_requirements: !ruby/object:Gem::Requirement
79
+ requirements:
80
+ - - "~>"
81
+ - !ruby/object:Gem::Version
82
+ version: '11.16'
83
+ - !ruby/object:Gem::Dependency
84
+ name: mixlib-cli
85
+ requirement: !ruby/object:Gem::Requirement
86
+ requirements:
87
+ - - "~>"
88
+ - !ruby/object:Gem::Version
89
+ version: '1.5'
90
+ type: :runtime
91
+ prerelease: false
92
+ version_requirements: !ruby/object:Gem::Requirement
93
+ requirements:
94
+ - - "~>"
95
+ - !ruby/object:Gem::Version
96
+ version: '1.5'
97
+ - !ruby/object:Gem::Dependency
98
+ name: factory_girl
99
+ requirement: !ruby/object:Gem::Requirement
100
+ requirements:
101
+ - - "~>"
102
+ - !ruby/object:Gem::Version
103
+ version: '4.4'
104
+ type: :development
105
+ prerelease: false
106
+ version_requirements: !ruby/object:Gem::Requirement
107
+ requirements:
108
+ - - "~>"
109
+ - !ruby/object:Gem::Version
110
+ version: '4.4'
111
+ - !ruby/object:Gem::Dependency
112
+ name: faker
113
+ requirement: !ruby/object:Gem::Requirement
114
+ requirements:
115
+ - - "~>"
116
+ - !ruby/object:Gem::Version
117
+ version: '1.3'
118
+ type: :development
119
+ prerelease: false
120
+ version_requirements: !ruby/object:Gem::Requirement
121
+ requirements:
122
+ - - "~>"
123
+ - !ruby/object:Gem::Version
124
+ version: '1.3'
125
+ - !ruby/object:Gem::Dependency
126
+ name: rspec
127
+ requirement: !ruby/object:Gem::Requirement
128
+ requirements:
129
+ - - "~>"
130
+ - !ruby/object:Gem::Version
131
+ version: '2.14'
132
+ type: :development
133
+ prerelease: false
134
+ version_requirements: !ruby/object:Gem::Requirement
135
+ requirements:
136
+ - - "~>"
137
+ - !ruby/object:Gem::Version
138
+ version: '2.14'
139
+ - !ruby/object:Gem::Dependency
140
+ name: simplecov
141
+ requirement: !ruby/object:Gem::Requirement
142
+ requirements:
143
+ - - "~>"
144
+ - !ruby/object:Gem::Version
145
+ version: '0.8'
146
+ type: :development
147
+ prerelease: false
148
+ version_requirements: !ruby/object:Gem::Requirement
149
+ requirements:
150
+ - - "~>"
151
+ - !ruby/object:Gem::Version
152
+ version: '0.8'
153
+ - !ruby/object:Gem::Dependency
154
+ name: shoulda
155
+ requirement: !ruby/object:Gem::Requirement
156
+ requirements:
157
+ - - "~>"
158
+ - !ruby/object:Gem::Version
159
+ version: '3.5'
160
+ type: :development
161
+ prerelease: false
162
+ version_requirements: !ruby/object:Gem::Requirement
163
+ requirements:
164
+ - - "~>"
165
+ - !ruby/object:Gem::Version
166
+ version: '3.5'
167
+ description: Automatically launch Hadoop or Spark clusters in the cloud
168
+ email: jp@julienpellet.com
169
+ executables:
170
+ - haas
171
+ extensions: []
172
+ extra_rdoc_files: []
173
+ files:
174
+ - bin/haas
175
+ - lib/haas.rb
176
+ - lib/haas/aws.rb
177
+ - lib/haas/blueprints.rb
178
+ - lib/haas/chef.rb
179
+ - lib/haas/config.rb
180
+ - lib/haas/utils.rb
181
+ - lib/models/cluster.rb
182
+ - lib/models/key_pair.rb
183
+ - lib/models/node.rb
184
+ - lib/templates/knife.rb.erb
185
+ homepage: http://github.com/jp/haas
186
+ licenses:
187
+ - Apache 2.0
188
+ metadata: {}
189
+ post_install_message:
190
+ rdoc_options: []
191
+ require_paths:
192
+ - lib
193
+ required_ruby_version: !ruby/object:Gem::Requirement
194
+ requirements:
195
+ - - ">="
196
+ - !ruby/object:Gem::Version
197
+ version: '0'
198
+ required_rubygems_version: !ruby/object:Gem::Requirement
199
+ requirements:
200
+ - - ">="
201
+ - !ruby/object:Gem::Version
202
+ version: '0'
203
+ requirements: []
204
+ rubyforge_project:
205
+ rubygems_version: 2.2.2
206
+ signing_key:
207
+ specification_version: 4
208
+ summary: Launch big data cluster in the cloud
209
+ test_files: []