awscli 0.1.2 → 0.1.3

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,15 +1,15 @@
1
1
  ---
2
2
  !binary "U0hBMQ==":
3
3
  metadata.gz: !binary |-
4
- MGYzNjYyNTcyZTBiZGQzNjM2MDM5YWIwMDE5ZGJkY2U0OTkwZTgxYQ==
4
+ ZjE2MzQxYjhkMTc5NDc5NzIyMjc2NDNiNzMxYjdiMTI4OThhOWJmNw==
5
5
  data.tar.gz: !binary |-
6
- MGI5OWM2NWI5YzkxNDU2YjcyY2FiZGFjNTcyN2Y1MjA5YWYxZGVlMw==
6
+ YjdlZjExYTBhMjAzNDE1ZjJiY2RlZWMyMjM1N2QyMzMyYjQxZGIyNQ==
7
7
  !binary "U0hBNTEy":
8
8
  metadata.gz: !binary |-
9
- MWJjMmZmZGU0YzI2YzcxOWFiYTlhZGJmZTNiZjE3NDA4ZjliN2M1MzVhY2Ew
10
- ODk5NDA1OTE4YTViYzExNjRjY2M4NTgyODQ4Y2EyMDJkZTQ4ZTVkYTc0M2Rj
11
- MjJhMTE4YjllMWZkYjhjY2U3YmVhZmQ3ZGE1MWZmZTg2NDNlOWI=
9
+ M2VjYWEyOTBkNmZjYThiZTdmOTdhODQxZjc1Zjc0NDM2NmU1MDMwMDMxNTYx
10
+ MGQ2NmZkZjJkODFiN2I4Y2JmNWZjZWI1ZTExYmIyMTQ0NjY0YjI3YzgyMmY1
11
+ MzA1OGRkOTdmN2FlZTAxOWNhY2NiYmJlNzhlN2ZiNTJlMGFkODM=
12
12
  data.tar.gz: !binary |-
13
- ZDIxYjZhZWUyMjg0ZTk2NzI3OWQ3Y2Y4OWI1MWExZDk1OWU3ODcxMmNjYTRi
14
- NmE3ZDA1Yjg5ZWRmMGVlYzlkZTU2N2NlZWVlNDYyMTNlNzc5NzYzOWE3MGEz
15
- MTExODI3YzQyMWRjYzM0OGYyNDNiNTliYzlhMjRkNDAzZjQ5ZTM=
13
+ MmQ2OWM5MGUxNWU4MmJmZWI5NDliMmE3YzNiOWQ5M2M4Nzg3NDZlOTkxNzIw
14
+ N2I2NTY3MDM4N2RmYzRjYTAwNzgwMGJlMTY2YzBmYjUxNWNmNTEzNzhiMDZh
15
+ MGFmOTczMmQ0N2ZhYTlhZjIxMDU3MmM5NDhmMzBlMDZjOWI3ODY=
data/bin/awscli CHANGED
@@ -1,23 +1,23 @@
1
1
  #!/usr/bin/env ruby
2
2
 
3
- #Trap interupt to quit cleanly
4
- Signal.trap("INT") { exit 1 }
3
+ #Trap interrupt to quit cleanly
4
+ Signal.trap('INT') { exit 1 }
5
5
 
6
6
  #load the path if not loaded previously
7
7
  $LOAD_PATH.unshift(File.dirname(__FILE__) + '/../lib') unless $LOAD_PATH.include?(File.dirname(__FILE__) + '/../lib')
8
8
 
9
9
  begin
10
- require "awscli"
10
+ require 'awscli'
11
11
  rescue LoadError
12
- require "rubygems"
13
- require "awscli"
12
+ require 'rubygems'
13
+ require 'awscli'
14
14
  end
15
15
 
16
16
  #Start the cli
17
17
  begin
18
18
  AwsCli::Cli.start
19
19
  rescue Interrupt
20
- puts "Caught Interrupt, Exiting..."
20
+ puts 'Caught Interrupt, Exiting...'
21
21
  rescue Excon::Errors::SocketError
22
22
  puts "Error: Establishing Connection to AWS, #{$!}"
23
23
  rescue Fog::Compute::AWS::Error
@@ -25,7 +25,7 @@ rescue Fog::Compute::AWS::Error
25
25
  rescue Fog::Compute::AWS::NotFound
26
26
  puts "Cannot find Resource: #{$!}"
27
27
  rescue Excon::Errors::InternalServerError
28
- puts "Something went wrong, please try again!."
28
+ puts 'Something went wrong, please try again!.'
29
29
  puts "Error: #{$!}"
30
30
  rescue ArgumentError
31
31
  puts "Cannot recognize the argument passed, #{$!}"
@@ -6,7 +6,7 @@ begin
6
6
  require 'highline/import'
7
7
  require 'yaml'
8
8
  rescue LoadError
9
- puts "Failed to load gems: fog, highline, thor"
9
+ puts 'Failed to load gems: fog, highline, thor'
10
10
  exit 1
11
11
  end
12
12
 
@@ -55,4 +55,6 @@ module AwsCli
55
55
  require 'awscli/cli/iam/policies'
56
56
  require 'awscli/cli/iam/roles'
57
57
  require 'awscli/cli/iam/profiles'
58
+ #EMR
59
+ require 'awscli/cli/emr'
58
60
  end
@@ -2,33 +2,33 @@ module Awscli
2
2
  module As
3
3
 
4
4
  class Activities
5
- def initialize connection, options = {}
6
- @@conn = connection
5
+ def initialize(connection)
6
+ @conn = connection
7
7
  end
8
8
 
9
- def list options
9
+ def list(options)
10
10
  if options[:group_name]
11
- puts @@conn.describe_scaling_activities('AutoScalingGroupName' => options[:group_name]).body['DescribeScalingActivitiesResult']['Activities'].to_yaml
11
+ puts @conn.describe_scaling_activities('AutoScalingGroupName' => options[:group_name]).body['DescribeScalingActivitiesResult']['Activities'].to_yaml
12
12
  else
13
- puts @@conn.describe_scaling_activities.body['DescribeScalingActivitiesResult']['Activities'].to_yaml
13
+ puts @conn.describe_scaling_activities.body['DescribeScalingActivitiesResult']['Activities'].to_yaml
14
14
  end
15
15
  end
16
16
  end
17
17
 
18
18
  class Configurations
19
- def initialize connection, options = {}
20
- @@conn = connection
19
+ def initialize(connection)
20
+ @conn = connection
21
21
  end
22
22
 
23
- def list options
23
+ def list(options)
24
24
  if options[:table]
25
- @@conn.configurations.table([:id, :instance_type, :key_name, :security_groups])
25
+ @conn.configurations.table([:id, :instance_type, :key_name, :security_groups])
26
26
  else
27
- puts @@conn.describe_launch_configurations.body['DescribeLaunchConfigurationsResult']['LaunchConfigurations'].to_yaml
27
+ puts @conn.describe_launch_configurations.body['DescribeLaunchConfigurationsResult']['LaunchConfigurations'].to_yaml
28
28
  end
29
29
  end
30
30
 
31
- def create options
31
+ def create(options)
32
32
  #validate block device mapping and parse it to a hash understandable by fog
33
33
  opts = Marshal.load(Marshal.dump(options))
34
34
  block_device_mapping = Array.new
@@ -37,9 +37,9 @@ module Awscli
37
37
  mapping = Hash.new
38
38
  #parse options
39
39
  abort "Invalid block device mapping format, expecting 'devicename=blockdevice' format" unless group =~ /\S=\S/
40
- device_name, block_device = group.split("=")
41
- abort "Invalid device name, expectiing '/dev/sd[a-z]'" unless device_name =~ /^\/dev\/sd[a-z]$/
42
- abort "Invalud block device format, expecting 'ephemeral[0..3]|none|[snapshot-id]:[volume-size]:[true|false]:[standard|io1[:iops]]'" unless block_device =~ /^(snap-.*|ephemeral\w{1,3}|none|:.*)$/
40
+ device_name, block_device = group.split('=')
41
+ abort "Invalid device name, expecting '/dev/sd[a-z]'" unless device_name =~ /^\/dev\/sd[a-z]$/
42
+ abort "Invalid block device format, expecting 'ephemeral[0..3]|none|[snapshot-id]:[volume-size]:[true|false]:[standard|io1[:iops]]'" unless block_device =~ /^(snap-.*|ephemeral\w{1,3}|none|:.*)$/
43
43
  mapping['DeviceName'] = device_name
44
44
  case block_device
45
45
  when 'none'
@@ -52,7 +52,7 @@ module Awscli
52
52
  mapping['Ebs.VolumeSize'] = volume_size if !volume_size.nil? && !volume_size.empty?
53
53
  mapping['Ebs.DeleteOnTermination'] = delete_on_termination if !delete_on_termination.nil? && !delete_on_termination.empty?
54
54
  else
55
- abort "Cannot validate block_device"
55
+ abort 'Cannot validate block_device'
56
56
  end
57
57
  block_device_mapping << mapping
58
58
  end
@@ -80,15 +80,15 @@ module Awscli
80
80
  opts.reject! { |k| k == 'id' }
81
81
 
82
82
  begin
83
- cfgs = @@conn.create_launch_configuration(options[:image_id], options[:instance_type], options[:id], opts)
83
+ cfgs = @conn.create_launch_configuration(options[:image_id], options[:instance_type], options[:id], opts)
84
84
  puts "Created Launch Configuration, #{options[:id]}"
85
85
  rescue Fog::AWS::AutoScaling::IdentifierTaken
86
86
  puts "A launch configuration already exists with the name #{options[:id]}"
87
87
  end
88
88
  end
89
89
 
90
- def delete cfg_name
91
- cfg = @@conn.configurations.get(cfg_name)
90
+ def delete(cfg_name)
91
+ cfg = @conn.configurations.get(cfg_name)
92
92
  abort "Cannot find launch configuration with name: #{cfg_name}" unless cfg
93
93
  cfg.destroy
94
94
  puts "Deleted Launch Configuration with name: #{cfg_name}"
@@ -96,24 +96,24 @@ module Awscli
96
96
  end
97
97
 
98
98
  class Groups
99
- def initialize connection, options = {}
100
- @@conn = connection
99
+ def initialize(connection)
100
+ @conn = connection
101
101
  end
102
102
 
103
- def list options
103
+ def list(options)
104
104
  if options[:table]
105
- @@conn.groups.table([:id, :launch_configuration_name, :desired_capacity, :min_size, :max_size, :vpc_zone_identifier, :termination_policies])
105
+ @conn.groups.table([:id, :launch_configuration_name, :desired_capacity, :min_size, :max_size, :vpc_zone_identifier, :termination_policies])
106
106
  else
107
107
  #yaml dump
108
- puts @@conn.describe_auto_scaling_groups.body['DescribeAutoScalingGroupsResult']['AutoScalingGroups'].to_yaml
108
+ puts @conn.describe_auto_scaling_groups.body['DescribeAutoScalingGroupsResult']['AutoScalingGroups'].to_yaml
109
109
  end
110
110
  end
111
111
 
112
- def create options
112
+ def create(options)
113
113
  # => validate & parse options
114
114
  opts = Marshal.load(Marshal.dump(options))
115
115
  #launch conf name
116
- abort "Launch configuration name not found: #{options[:launch_configuration_name]}" unless @@conn.configurations.get(options[:launch_configuration_name])
116
+ abort "Launch configuration name not found: #{options[:launch_configuration_name]}" unless @conn.configurations.get(options[:launch_configuration_name])
117
117
  #remove required options from options hash
118
118
  opts.reject! { |k| k == 'id' }
119
119
  opts.reject! { |k| k == 'availability_zones' }
@@ -141,7 +141,7 @@ module Awscli
141
141
  if tags = opts.delete(:tags)
142
142
  parsed_tags = Array.new
143
143
  tags.each do |t|
144
- abort "Invliad tags format, expecting 'key=value' format" unless t =~ /\S=\S/
144
+ abort "Invalid tags format, expecting 'key=value' format" unless t =~ /\S=\S/
145
145
  end
146
146
  tags.each do |tag|
147
147
  parsed_tag = Hash.new
@@ -159,7 +159,7 @@ module Awscli
159
159
  opts.merge!('VPCZoneIdentifier' => vpc_zone_identifiers.join(','))
160
160
  end
161
161
  begin
162
- @@conn.create_auto_scaling_group(
162
+ @conn.create_auto_scaling_group(
163
163
  options[:id],
164
164
  options[:availability_zones],
165
165
  options[:launch_configuration_name],
@@ -175,64 +175,64 @@ module Awscli
175
175
  end
176
176
  end
177
177
 
178
- def set_desired_capacity options
178
+ def set_desired_capacity(options)
179
179
  # => Sets the desired capacity of the auto sacling group
180
- asg = @@conn.groups.get(options[:id])
180
+ asg = @conn.groups.get(options[:id])
181
181
  abort "Cannot find Auto Scaling Group with name: #{options[:id]}" unless asg
182
182
  min_size = asg.min_size
183
183
  max_size = asg.max_size
184
184
  abort "Desired capacity should fall in between auto scaling groups min-size: #{min_size} and max-size: #{max_size}" unless options[:desired_capacity].between?(min_size, max_size)
185
185
  abort "Desired capacity is already #{asg.desired_capacity}" if options[:desired_capacity] == asg.desired_capacity
186
- @@conn.set_desired_capacity(options[:id], options[:desired_capacity])
186
+ @conn.set_desired_capacity(options[:id], options[:desired_capacity])
187
187
  puts "Scaled Auto Scaling Group: #{options[:id]} to a desired_capacity of #{options[:desired_capacity]}"
188
188
  end
189
189
 
190
190
  # def update
191
- # asg = @@conn.groups.get(options[:id])
191
+ # asg = @conn.groups.get(options[:id])
192
192
  # abort "Cannot find Auto Scaling Group with name: #{options[:id]}" unless asg
193
193
  # opts = Marshal.load(Marshal.dump(options))
194
194
  # opts.reject! { |k| k == 'id' }
195
195
  # asg.update(opts)
196
196
  # end
197
197
 
198
- def suspend_processes options
198
+ def suspend_processes(options)
199
199
  if options[:scaling_processes]
200
- @@conn.suspend_processes(
200
+ @conn.suspend_processes(
201
201
  options[:id],
202
202
  'ScalingProcesses' => options[:scaling_processes])
203
203
  puts "Suspending processes #{options[:scaling_processes]} for group: #{options[:id]}"
204
204
  else
205
- @@conn.suspend_processes(options[:id])
205
+ @conn.suspend_processes(options[:id])
206
206
  puts "Suspending processes for group: #{options[:id]}"
207
207
  end
208
208
  end
209
209
 
210
- def resume_processes options
210
+ def resume_processes(options)
211
211
  if options[:scaling_processes]
212
- @@conn.resume_processes(
212
+ @conn.resume_processes(
213
213
  options[:id],
214
214
  'ScalingProcesses' => options[:scaling_processes]
215
215
  )
216
216
  puts "Resuming processes #{options[:scaling_processes]} for group: #{options[:id]}"
217
217
  else
218
- @@conn.resume_processes(options[:id])
218
+ @conn.resume_processes(options[:id])
219
219
  puts "Resuming processes for group: #{options[:id]}"
220
220
  end
221
221
  end
222
222
 
223
- def delete options
223
+ def delete(options)
224
224
  begin
225
225
  if options[:force]
226
- @@conn.delete_auto_scaling_group(
226
+ @conn.delete_auto_scaling_group(
227
227
  options[:id],
228
228
  'ForceDelete' => options[:force]
229
229
  )
230
230
  else
231
- @@conn.delete_auto_scaling_group(options[:id])
231
+ @conn.delete_auto_scaling_group(options[:id])
232
232
  end
233
233
  rescue Fog::AWS::AutoScaling::ResourceInUse
234
- puts "You cannot delete an AutoScalingGroup while there are instances or pending Spot instance request(s) still in the group"
235
- puts "Use -f option to force delete instances attached to the sacling group"
234
+ puts 'You cannot delete an AutoScalingGroup while there are instances or pending Spot instance request(s) still in the group'
235
+ puts 'Use -f option to force delete instances attached to the sacling group'
236
236
  exit 1
237
237
  end
238
238
  puts "Deleted Auto scaling group #{options[:id]}"
@@ -240,19 +240,19 @@ module Awscli
240
240
  end
241
241
 
242
242
  class Instances
243
- def initialize connection, options = {}
244
- @@conn = connection
243
+ def initialize(connection)
244
+ @conn = connection
245
245
  end
246
246
 
247
247
  def list
248
- @@conn.instances.table
248
+ @conn.instances.table
249
249
  end
250
250
 
251
- def terminate instance_id, decrement_capacity
252
- instance = @@conn.instances.get(instance_id)
251
+ def terminate(instance_id, decrement_capacity)
252
+ instance = @conn.instances.get(instance_id)
253
253
  abort "Cannot find instace with id: #{instance_id}" unless instance
254
254
  begin
255
- @@conn.terminate_instance_in_auto_scaling_group(instance_id, decrement_capacity)
255
+ @conn.terminate_instance_in_auto_scaling_group(instance_id, decrement_capacity)
256
256
  puts "Terminated Instance with id: #{instance_id}"
257
257
  puts "Decrement Capacity of the scaling group: #{instance.auto_scaling_group_name} by 1" if decrement_capacity
258
258
  rescue Fog::AWS::AutoScaling::ValidationError
@@ -262,22 +262,22 @@ module Awscli
262
262
  end
263
263
 
264
264
  class Policies
265
- def initialize connection, options = {}
266
- @@conn = connection
265
+ def initialize(connection)
266
+ @conn = connection
267
267
  end
268
268
 
269
269
  def list
270
- @@conn.policies.table
270
+ @conn.policies.table
271
271
  end
272
272
 
273
- def create options
274
- @@conn.policies.create(options)
275
- puts "Created auto sacling policy: #{options[:id]}, for auto scaling group: #{options[:auto_scaling_group_name]}"
273
+ def create(options)
274
+ @conn.policies.create(options)
275
+ puts "Created auto scaling policy: #{options[:id]}, for auto scaling group: #{options[:auto_scaling_group_name]}"
276
276
  end
277
277
 
278
- def destroy name, group_name
278
+ def destroy(name, group_name)
279
279
  begin
280
- @@conn.delete_policy(group_name, name)
280
+ @conn.delete_policy(group_name, name)
281
281
  puts "Deleted auto scaling policy: #{name}"
282
282
  rescue Fog::AWS::AutoScaling::ValidationError
283
283
  puts "Validation Error: #{$!}"
@@ -8,7 +8,7 @@ module AwsCli
8
8
  # :desc => 'Configuration file, accepts ENV $AWSCLI_CONFIG_FILE',
9
9
  # :default => ENV['AWSCLI_CONFIG_FILE'] || "~/.awscli.yml"
10
10
 
11
- desc "help", "help banner"
11
+ desc 'help', 'help banner'
12
12
  def help_banner
13
13
  puts <<-HELP.gsub(/^ {8}/, '')
14
14
  Amazon Web Services Command Line Interface, Version - #{Awscli::VERSION}
@@ -0,0 +1,49 @@
1
+ #
2
+ # => AWSCLI EMR CREATE Usage Examples
3
+ #
4
+
5
+ # => Create Hive cluster
6
+ awscli emr create --name=awsclijobflow --log-uri=s3n://yourbucket/emrlogs -k ruby-sample --instance-count=3 \
7
+ --master-instance-type=m1.small --slave-instance-type=m1.small --alive --hive-interactive
8
+
9
+ # => Create Custom Jar Job
10
+ awscli emr create --name=awscliemrtest --log-uri=s3n://yourbucket/emrlogs --instance-ec2-key-name=keyname.pem\
11
+ --instance-count=3 --alive --master-instance-type=m1.small --slave-instance-type=m1.small \
12
+ --custom-jar-steps="s3n://yourbucket/worcount/wc.jar",wc,wordcounttest,TERMINATE_JOB_FLOW,"-input","s3n://yourbucket/wordcount/input","-output","s3n://yourbucket/wordcount/output"
13
+
14
+ # => Create Pig Interactive
15
+ awscli emr create --name=awscliemrtest --log-uri=s3n://yourbucket/emrlogs --instance-ec2-key-name=keyname.pem\
16
+ --instance-count=3 --alive --master-instance-type=m1.small --slave-instance-type=m1.small --pig-interactive
17
+
18
+ # => Hive Job
19
+ awscli emr create --name=awscliemrtest --log-uri=s3n://yourbucket/emrlogs --instance-ec2-key-name=keyname.pem\
20
+ --instance-count=3 --alive --master-instance-type=m1.small --slave-instance-type=m1.small \
21
+ --hive-steps=s3n://yourbucket/hive/script.q,s3n://yourbucket/hive/input,s3n://yourbucket/hive/output,-d,'LIBS=s3n://elasticmapreduce/samples/hive-ads/libs'
22
+
23
+ # => Pig Job
24
+ awscli emr create --name=awsclihivesteps --log-uri=s3://yourbucket/emrlogs --instance-ec2-key-name=keyname.prem\
25
+ --instance-count=2 --alive --master-instance-type=m1.small --slave-instance-type=m1.small \
26
+ --pig-steps="s3n://elasticmapreduce/samples/pig-apache/do-reports2.pig","s3n://elasticmapreduce/samples/pig-apache/input","s3n://ashrithtst/pigsteps/output2"
27
+
28
+ # => Adding Instance Groups
29
+ awscli emr create --name=awscliemrtest --log-uri=s3n://yourbucket/emrlogs --instance-ec2-key-name=keyname.pem\
30
+ --instance-groups=1,MASTER,m1.small 2,CORE,m1.small 4,TASK,m1.small
31
+
32
+ # => Create HBase cluster
33
+ awscli emr create --name="hbase install" --log-uri="s3n://yourbucket/emrlogs" --instance-ec2-key-name="keyname.pem" \
34
+ --hadoop-version=0.20.205 --instance-count=3 --hbase-install --master-instance-type=m1.large --slave-instance-type=m1.large
35
+
36
+ # => Create HBase cluster from previous backup
37
+ awscli emr create --name="hbase restored" --log-uri="s3n://yourbucket/emrlogs" --instance-ec2-key-name="keyname.pem" \
38
+ --hadoop-version=0.20.205 --instance-count=3 --master-instance-type=m1.large --slave-instance-type=m1.large --hbase-install \
39
+ --hbase-backup-restore="s3://yourbucket/backups/<jobid>","20120809T031314Z"
40
+
41
+ # => Create HBase cluster with incremental backups
42
+ awscli emr create --name="hbase install" --log-uri="s3n://yourbucket/emrlogs" --instance-ec2-key-name="keyname.pem" \
43
+ --hadoop-version=0.20.205 --instance-count=3 --hbase-install --master-instance-type=m1.large --slave-instance-type=m1.large \
44
+ --hbase-backup-schedule=7,days,"s3://yourbucket/backups/hbase","2012-06-15T20:00Z"
45
+
46
+ # => Create HBase cluster with incremental and consistent backups
47
+ awscli emr create --name="hbase install" --log-uri="s3n://yourbucket/emrlogs" --instance-ec2-key-name="keyname.pem" \
48
+ --hadoop-version=0.20.205 --instance-count=3 --hbase-install --master-instance-type=m1.large --slave-instance-type=m1.large \
49
+ --hbase-backup-schedule=7,days,"s3://yourbucket/backups/hbase","2012-06-15T20:00Z" --hbase-consistent-backup
@@ -0,0 +1,157 @@
1
+ module AwsCli
2
+ module CLI
3
+ require 'awscli/cli'
4
+ require 'awscli/connection'
5
+ require 'awscli/emr'
6
+ class Emr < Thor
7
+ class_option :region, :type => :string, :desc => "region to connect to", :default => 'us-west-1'
8
+
9
+ desc 'usage', 'show the usage examples'
10
+ def usage
11
+ File.open(File.dirname(__FILE__) + '/UsageExamples/emr') do |file|
12
+ puts file.read
13
+ end
14
+ end
15
+
16
+ desc 'list [OPTIONS]', 'returns a yaml dump of job flows that match all of the supplied parameters'
17
+ method_option :job_flow_ids, :aliases => '-j', :type => :array, :desc => 'Return only job flows whose job flow ID is contained in this list'
18
+ method_option :job_flow_status, :aliases => '-s' ,:type => :array, :desc => 'Return only job flows whose state is contained in this list, Valid Values: RUNNING | WAITING | SHUTTING_DOWN | STARTING'
19
+ method_option :table, :aliases => '-t', :type => :boolean, :default => false, :desc => 'Prints out table format'
20
+ def list
21
+ if options[:job_flow_status]
22
+ abort 'Invalid job flow status' unless %w(RUNNING WAITING SHUTTING_DOWN STARTING).each_cons(options[:job_flow_status].size).include? options[:job_flow_status]
23
+ end
24
+ create_emr_object
25
+ @emr.list options
26
+ end
27
+
28
+ desc 'delete', 'shuts a list of job flows down'
29
+ method_option :job_flow_ids, :aliases => '-j', :type => :array, :required => true, :desc => 'list of strings that uniquely identify the job flows to delete'
30
+ def delete
31
+ create_emr_object
32
+ @emr.delete options[:job_flow_ids]
33
+ end
34
+
35
+ desc 'add_instances [OPTIONS]', 'adds an instance group to a running cluster'
36
+ long_desc <<-DESC
37
+ USAGE:
38
+
39
+ awscli emr add_instances -j j-31HK0PWNQ2JKH -c 2 -r TASK -t m1.small -n computegroup -b 0.2
40
+ DESC
41
+ method_option :job_flow_id, :aliases => '-j', :banner => 'ID', :desc => 'Job flow in which to add the instance groups'
42
+ method_option :bid_price, :aliases => '-b', :desc => 'Bid price for each Amazon EC2 instance in the instance group when launching nodes as Spot Instances'
43
+ method_option :instance_count, :aliases => '-c', :banner => 'COUNT', :type => :numeric, :desc => 'Target number of instances for the instance group'
44
+ method_option :instance_role, :aliases => '-r', :banner => 'ROLE', :desc => 'The role of the instance group in the cluster, Valid values: MASTER | CORE | TASK'
45
+ method_option :instance_type, :aliases => '-t', :banner => 'TYPE', :desc => 'The Amazon EC2 instance type for all instances in the instance group'
46
+ method_option :name, :aliases => '-n', :desc => 'Friendly name given to the instance group'
47
+ def add_instances
48
+ if !options[:job_flow_id] and !options[:instance_count] and !options[:instance_role] and !options[:instance_type]
49
+ puts 'These options are required --job-flow-id, --instance-count, --instance-role and --instance-type'
50
+ exit
51
+ end
52
+ abort 'Invalid Instance Role' unless %w(MASTER CORE TASK).include?(options[:instance_role])
53
+ create_emr_object
54
+ @emr.add_instance_group options
55
+ end
56
+
57
+ desc 'modify_instances [OPTIONS]', 'modifies the number of nodes and configuration settings of an instance group'
58
+ method_option :instance_count, :aliases => '-c', :banner => 'COUNT', :desc => 'Target size for instance group'
59
+ method_option :instance_group_id, :aliases => '-g', :banner => 'ID', :desc => 'Unique ID of the instance group to expand or shrink'
60
+ def modify_instances
61
+ if !options[:instance_count] and !options[:instance_group_id]
62
+ puts 'These options are required --instance-count and --instance-group-id'
63
+ exit
64
+ end
65
+ create_emr_object
66
+ @emr.modify_instance_group options
67
+ end
68
+
69
+ desc 'termination_protection [OPTIONS]', 'locks a job flow so the Amazon EC2 instances in the cluster cannot be terminated by user intervention'
70
+ method_option :job_flow_ids, :aliases => '-j', :banner => 'ID(S)', :required => true, :type => :array, :desc => 'list of strings that uniquely identify the job flows to protect'
71
+ method_option :termination_protection, :aliases => '-t', :type => :boolean, :default => false, :desc => 'indicates whether to protect the job flow, if set temination protection is enabled if left alone termination protection is turned off'
72
+ def termination_protection
73
+ create_emr_object
74
+ @emr.set_termination_protection options[:job_flow_ids], options[:termination_protection]
75
+ end
76
+
77
+ desc 'create [OPTIONS]', 'creates and starts running a new job flow'
78
+ #TODO: update Long Desc
79
+ long_desc <<-DESC
80
+ Creates and starts running a new job flow.
81
+
82
+ The job flow will run the steps specified. Once the job flow completes, the cluster is stopped and the HDFS partition is lost.
83
+ To prevent loss of data, configure the last step of the job flow to store results in Amazon S3. Or pass in '--alive' option to keep the cluster running
84
+ even after the workflow is complete(Note: This requires manual termination of the job flow).
85
+
86
+ For additional protection, you can set --termination-protection to lock the job flow and prevent it from being terminated bu API call, user intervention,
87
+ or in the event of job flow error.
88
+
89
+ Using this create command, user can also create hive (--hive-interactive), pig (--pig-interactive), hbase (--hbase-install) interactive clusters.
90
+
91
+ See `awscli emr usage` for examples on how to use this interface
92
+ DESC
93
+ method_option :name, :aliases => '-n', :desc => 'The name of the job flow'
94
+ method_option :log_uri, :desc => 'Specifies the location in Amazon S3 to write the log files of the job flow.' #If a value is not provided, logs are not created
95
+ method_option :instance_ec2_key_name, :aliases => '-k', :desc => 'Specifies the name of the Amazon EC2 key pair that can be used to ssh to the master node as the user called hadoop'
96
+ method_option :instance_ec2_subnet_id, :desc => 'Amazon VPC subnet where you want the job flow to launch'
97
+ method_option :hadoop_version, :default => '1.0.3',:desc => 'Specifies the Hadoop version to install for the job flow'
98
+ method_option :instance_count, :type => :numeric, :desc => 'The number of Amazon EC2 instances used to execute the job flow'
99
+ method_option :alive, :type => :boolean, :default => false, :desc => 'Job flow stays running even though it has executed all its steps'
100
+ method_option :master_instance_type, :desc => 'The EC2 instance type of the master node'
101
+ method_option :slave_instance_type, :desc => 'The EC2 instance type of the slave nodes'
102
+ method_option :termination_protection, :type => :boolean, :default => false, :desc => 'Specifies whether to lock the job flow to prevent the Amazon EC2 instances from being terminated by API call'
103
+ method_option :bootstrap_actions, :aliases => '-b', :type => :array, :desc => 'Add bootstrap action script. Format => "name,bootstrap_action_path,bootstrap_action_args"'
104
+ method_option :instance_groups, :aliases => '-g', :type => :array, :desc => 'Add instance groups. Format => "instance_count,instance_role(MASTER | CORE | TASK),instance_type,name,bid_price" see usage command for examples'
105
+ method_option :custom_jar_steps, :aliases => '-s', :type => :array, :desc => 'Add a step that runs a custom jar. Format=> "jar_path(s3)*,name_of_step*,main_class,action_on_failure(TERMINATE_JOB_FLOW | CANCEL_AND_WAIT | CONTINUE),arg1=agr2=arg3,properties(k=v,k=v)"'
106
+ method_option :hive_interactive, :type => :boolean, :default => false, :desc => 'Add a step that sets up the job flow for an interactive (via SSH) hive session'
107
+ method_option :pig_interactive, :type => :boolean, :default => false, :desc => 'Add a step that sets up the job flow for an interactive (via SSH) pig session'
108
+ method_option :hive_steps, :type => :array, :desc => 'Add a step that runs a Hive script. Format=> script_path(s3)*,input_path(s3),output_path(s3),extra_args(-d,args1,-d,args2,-d,arg3)'
109
+ method_option :pig_steps, :type => :array, :desc => 'Add a step that runs a Pig script. Format=> script_path(s3)*,input_path(s3),output_path(s3),extra_args(-p,args1,-p,args2,-p,arg3)'
110
+ method_option :streaming_steps, :type => :array, :desc => 'Add a step that performs hadoop streaming. Format=> input*,output*,mapper*,reducer*,extra_arg1,extra_arg2'
111
+ method_option :hbase_install, :type => :boolean, :default => false, :desc => 'Install hbase on the cluster'
112
+ method_option :hbase_backup_restore, :desc => 'Specify whether to preload the HBase cluster with data stored in Amazon S3. Format=> path(s3)*,version'
113
+ method_option :hbase_backup_schedule, :desc => 'Specify whether to schedule automatic incremental backups. Format=> frequency*,frequency_unit*(Days|Hours|Mins),path(s3)*,start_time*(now|date)'
114
+ method_option :hbase_consistent_backup, :type => :boolean, :default => false, :desc => 'Perform a consistent backup'
115
+ def create
116
+ if !options[:name]
117
+ puts 'These options are required --name'
118
+ exit
119
+ end
120
+ create_emr_object
121
+ @emr.create_job_flow options
122
+ end
123
+
124
+ desc 'add_ig [OPTIONS]', 'adds an instance group(s) to a running cluster'
125
+ method_option :job_flow_id, :aliases => '-j', :desc => 'Job flow in which to add the instance groups'
126
+ method_option :instance_groups, :type => :array, :aliases => '-g', :desc => 'Add instance groups. Format => "instance_count,instance_role(MASTER | CORE | TASK),instance_type,name,bid_price"'
127
+ def add_ig
128
+ unless options[:job_flow_id] and options[:instance_groups]
129
+ abort "--job-flow-id and --instance-groups are required"
130
+ end
131
+ create_emr_object
132
+ @emr.add_instance_groups options[:job_flow_id], options[:instance_groups]
133
+ end
134
+
135
+ desc 'add_steps [OPTIONS]', 'adds new steps to a running job flow'
136
+ method_option :job_flow_id, :aliases => '-j', :desc => 'A string that uniquely identifies the job flow'
137
+ method_option :steps, :aliases => '-s', :type => :array, :desc => 'Add list of steps to be executed by job flow. Format=> jar_path(s3)*,name_of_step*,main_class,action_on_failure(TERMINATE_JOB_FLOW | CANCEL_AND_WAIT | CONTINUE),arg1=agr2=arg3,properties(k=v,k=v)'
138
+ def add_steps
139
+ unless options[:job_flow_id] and options[:steps]
140
+ abort "--job-flow-id and --steps are required"
141
+ end
142
+ create_emr_object
143
+ @emr.add_steps options[:job_flow_id], options[:steps]
144
+ end
145
+ private
146
+
147
+ def create_emr_object
148
+ puts 'EMR Establishing Connetion...'
149
+ $emr_conn = Awscli::Connection.new.request_emr
150
+ puts 'EMR Establishing Connetion... OK'
151
+ @emr = Awscli::Emr::EMR.new($emr_conn)
152
+ end
153
+
154
+ AwsCli::Cli.register AwsCli::CLI::Emr, :emr, 'emr [COMMAND]', 'AWS Elastic Map Reduce Interface'
155
+ end
156
+ end
157
+ end