vagrant-mos 0.8.53 → 0.8.54

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 19d0897f6817f014641fc53677b9209ee4ead668
4
- data.tar.gz: c0983c99bd9e8a25058082a77f6c877b52f5b035
3
+ metadata.gz: ab6993e3c1608d77c180c670d922df230a5d47ec
4
+ data.tar.gz: 2da4fee620e4c28cc1d4eba2e6650ac1d36dee8d
5
5
  SHA512:
6
- metadata.gz: 562cf8094996ee2f8446671e162350db42e27a4ab94931bf26390f16a622e7ac0921e09a8f2ea42824b148eba617a2c6e7389ac1b9d157cd08d16a333139abfc
7
- data.tar.gz: 251260cbf24872cd692ac92713f0534e240080f64ae2f6e7773129ee4f2a2bcae309dfa34c263d935dd05aa324b54ee6deff0a3f2b49751560b74783426e0f56
6
+ metadata.gz: 24293e0504bfcc62dd675ae6adbeface5339e4f1ec30fc208ed80f5b4648635d46f2cd41d2d3153f78cfc9bfeff8e520a715a9f13529f0a098a3edef9a84f5be
7
+ data.tar.gz: f9d92d0566d8d9daedfbb77ad6668567d06c615d5061e8fa69d1e1ad6194618b7f6442d34e6a507c08f8424a4912b14c827b117dbca42ac1ae8d766daf5eb57b
@@ -15,9 +15,6 @@ module VagrantPlugins
15
15
  @app = app
16
16
  @logger = Log4r::Logger.new("vagrant_mos::action::connect_mos")
17
17
  end
18
- #li= Client.new('c9b13af321f247a496f925d70ce001b3','7013bacdb1d44e0a851aa8786f742596','https://192.168.2.33:8883')
19
- #puts li.get_balance
20
-
21
18
 
22
19
  def call(env)
23
20
  # Get the region we're going to booting up in
@@ -26,31 +23,10 @@ module VagrantPlugins
26
23
  # Get the configs
27
24
  region_config = env[:machine].provider_config.get_region_config(region)
28
25
 
29
- =begin
30
- # Build the fog config
31
- fog_config = {
32
- :provider => :mos,
33
- :region => region
34
- }
35
- if region_config.use_iam_profile
36
- fog_config[:use_iam_profile] = true
37
- else
38
- fog_config[:mos_access_key_id] = region_config.access_key_id
39
- fog_config[:mos_secret_access_key] = region_config.secret_access_key
40
- fog_config[:mos_secret_url] = region_config.secret_access_url
41
- end
42
-
43
- fog_config[:endpoint] = region_config.endpoint if region_config.endpoint
44
- fog_config[:version] = region_config.version if region_config.version
45
- =end
46
-
47
26
  @logger.info("Connecting to MOS...")
48
27
  #env[:mos_compute] = Fog::Compute.new(fog_config)
49
28
  env[:mos_compute] = Client.new(region_config.access_key_id, region_config.secret_access_key, region_config.secret_access_url)
50
- #require "mos-sdk"
51
- #include MOS
52
29
 
53
- # env[:mos_elb] = Fog::MOS::ELB.new(fog_config.except(:provider, :endpoint))
54
30
  @app.call(env)
55
31
  end
56
32
  end
@@ -9,7 +9,6 @@ module VagrantPlugins
9
9
  end
10
10
 
11
11
  def call(env)
12
- puts env[:machine].state.id
13
12
  env[:result] = env[:machine].state.id == "ready"
14
13
  @app.call(env)
15
14
  end
@@ -22,8 +22,7 @@ module VagrantPlugins
22
22
 
23
23
  # Find the machine
24
24
  server = (mos.describe_instances([machine.id]))["Instance"]
25
- #puts server
26
- #server = mos.servers.get(machine.id)
25
+
27
26
  if server.nil?
28
27
  # The machine can't be found
29
28
  @logger.info("Machine couldn't be found, assuming it got destroyed.")
@@ -47,16 +46,11 @@ module VagrantPlugins
47
46
  @logger.info("SSH host attribute not found #{attr_name}")
48
47
  end
49
48
  end
50
- #puts 2
51
49
 
52
50
  if !host_value
53
51
  host_value = server["ipAddresses"]
54
52
  end
55
- #puts "ssh_host_attribute: #{ssh_host_attribute}"
56
- #puts "ssh_attrs: #{ssh_attrs}"
57
- #puts server["ipAddresses"]
58
- #puts host_value
59
- #puts 3
53
+
60
54
  return {:host => host_value, :port => 22}
61
55
  end
62
56
  end
@@ -21,13 +21,7 @@ module VagrantPlugins
21
21
  return :not_created if machine.id.nil?
22
22
 
23
23
  # Find the machine
24
- #puts mos
25
- #puts machine.id
26
24
  server = (mos.describe_instances([machine.id]))["Instance"]
27
- #puts "read_state"
28
- #puts server
29
- #puts "finish read_state"
30
- #server = mos.servers.get(machine.id)
31
25
  if server.nil? || [:"deleting"].include?(server["status"])
32
26
  # The machine can't be found
33
27
  @logger.info("Machine not found or terminated, assuming it got destroyed.")
@@ -35,8 +29,6 @@ module VagrantPlugins
35
29
  return :not_created
36
30
  end
37
31
 
38
- # Return the state
39
- # puts server["status"]
40
32
  return server["status"]
41
33
 
42
34
  end
@@ -49,11 +49,6 @@ module VagrantPlugins
49
49
  env[:ui].warn(I18n.t("vagrant_mos.launch_no_keypair"))
50
50
  end
51
51
 
52
- # If there is a subnet ID then warn the user
53
- #if subnet_id && !elastic_ip
54
- # env[:ui].warn(I18n.t("vagrant_mos.launch_vpc_warning"))
55
- #end
56
-
57
52
  # Launch!
58
53
  env[:ui].info(I18n.t("vagrant_mos.launching_instance"))
59
54
  env[:ui].info(" -- Type: #{instance_type}")
@@ -65,14 +60,12 @@ module VagrantPlugins
65
60
  env[:ui].info(" -- IAM Instance Profile ARN: #{iam_instance_profile_arn}") if iam_instance_profile_arn
66
61
  env[:ui].info(" -- IAM Instance Profile Name: #{iam_instance_profile_name}") if iam_instance_profile_name
67
62
  env[:ui].info(" -- Private IP: #{private_ip_address}") if private_ip_address
68
- #env[:ui].info(" -- Elastic IP: #{elastic_ip}") if elastic_ip
69
63
  env[:ui].info(" -- User Data: yes") if user_data
70
64
  env[:ui].info(" -- Security Groups: #{security_groups.inspect}") if !security_groups.empty?
71
65
  env[:ui].info(" -- User Data: #{user_data}") if user_data
72
66
  env[:ui].info(" -- Block Device Mapping: #{block_device_mapping}") if block_device_mapping
73
67
  env[:ui].info(" -- Terminate On Shutdown: #{terminate_on_shutdown}")
74
68
  env[:ui].info(" -- Monitoring: #{monitoring}")
75
- #env[:ui].info(" -- EBS optimized: #{ebs_optimized}")
76
69
  env[:ui].info(" -- Assigning a public IP address in a VPC: #{associate_public_ip}")
77
70
 
78
71
  options = {
@@ -100,34 +93,9 @@ module VagrantPlugins
100
93
 
101
94
  begin
102
95
  # todo
103
- #puts options
104
- #puts options['image_id']
105
- #puts options[:flavor_id]
106
- #puts options["key_name"]
107
96
  server = env[:mos_compute].create_instance(options[:image_id], options[:flavor_id], nil, nil, options[:key_name], datadisk=9, bandwidth=2)
108
- #server = env[:mos_compute].create_instance("320bbeb9-788f-4e7b-86af-7ea377b6a99e", "C2_M2", nil, nil, nil, datadisk=9, bandwidth=2)
109
- #server = env[:mos_compute].servers.create(options)
110
- #puts server
111
97
  rescue Exception => e
112
- raise Errors::FogError, :message
113
- =begin
114
- rescue Fog::Compute::MOS::NotFound => e
115
- # Invalid subnet doesn't have its own error so we catch and
116
- # check the error message here.
117
- if e.message =~ /subnet ID/
118
- raise Errors::FogError,
119
- :message => "Subnet ID not found: #{subnet_id}"
120
- end
121
-
122
- raise
123
- rescue Fog::Compute::MOS::Error => e
124
- raise Errors::FogError, :message => e.message
125
- rescue Excon::Errors::HTTPStatusError => e
126
- raise Errors::InternalMosError,
127
- :error => e.message,
128
- :response => e.response.body
129
-
130
- =end
98
+ raise Errors::MosError, :message => e.message
131
99
  end
132
100
 
133
101
  # Immediately save the ID since it is created at this point.
@@ -139,14 +107,19 @@ module VagrantPlugins
139
107
 
140
108
  env[:ui].info(I18n.t("vagrant_mos.waiting_for_ready"))
141
109
  begin
142
- retryable(:on => Fog::Errors::TimeoutError, :tries => tries) do
110
+ retryable(:on => Errors::InstanceReadyTimeout, :tries => tries) do
143
111
  # If we're interrupted don't worry about waiting
144
112
  next if env[:interrupted]
145
113
 
146
114
  # Wait for the server to be ready
147
115
  #server.wait_for(2, 5) { ready? }
116
+ if(server["status"] == "running")
117
+ break
118
+ else
119
+ sleep(2)
120
+ end
148
121
  end
149
- rescue Fog::Errors::TimeoutError
122
+ rescue Errors::InstanceReadyTimeout
150
123
  # Delete the instance
151
124
  terminate(env)
152
125
 
@@ -158,12 +131,6 @@ module VagrantPlugins
158
131
 
159
132
  @logger.info("Time to instance ready: #{env[:metrics]["instance_ready_time"]}")
160
133
 
161
- # Allocate and associate an elastic IP if requested
162
- #if elastic_ip
163
- # domain = subnet_id ? 'vpc' : 'standard'
164
- # do_elastic_ip(env, domain, server, elastic_ip)
165
- #end
166
-
167
134
  if !env[:interrupted]
168
135
  env[:metrics]["instance_ssh_time"] = Util::Timer.time do
169
136
  # Wait for SSH to be ready.
@@ -210,72 +177,6 @@ module VagrantPlugins
210
177
  !rules.select { |r| (r["fromPort"]..r["toPort"]).include?(port) }.empty?
211
178
  end
212
179
 
213
- =begin
214
- def do_elastic_ip(env, domain, server, elastic_ip)
215
- if elastic_ip =~ /\d+\.\d+\.\d+\.\d+/
216
- begin
217
- address = env[:mos_compute].addresses.get(elastic_ip)
218
- rescue
219
- handle_elastic_ip_error(env, "Could not retrieve Elastic IP: #{elastic_ip}")
220
- end
221
- if address.nil?
222
- handle_elastic_ip_error(env, "Elastic IP not available: #{elastic_ip}")
223
- end
224
- @logger.debug("Public IP #{address.public_ip}")
225
- else
226
- begin
227
- allocation = env[:mos_compute].allocate_address(domain)
228
- rescue
229
- handle_elastic_ip_error(env, "Could not allocate Elastic IP.")
230
- end
231
- @logger.debug("Public IP #{allocation.body['publicIp']}")
232
- end
233
-
234
- # Associate the address and save the metadata to a hash
235
- h = nil
236
- if domain == 'vpc'
237
- # VPC requires an allocation ID to assign an IP
238
- if address
239
- association = env[:mos_compute].associate_address(server.id, nil, nil, address.allocation_id)
240
- else
241
- association = env[:mos_compute].associate_address(server.id, nil, nil, allocation.body['allocationId'])
242
- # Only store release data for an allocated address
243
- h = {:allocation_id => allocation.body['allocationId'], :association_id => association.body['associationId'], :public_ip => allocation.body['publicIp']}
244
- end
245
- else
246
- # Standard EC2 instances only need the allocated IP address
247
- if address
248
- association = env[:mos_compute].associate_address(server.id, address.public_ip)
249
- else
250
- association = env[:mos_compute].associate_address(server.id, allocation.body['publicIp'])
251
- h = {:public_ip => allocation.body['publicIp']}
252
- end
253
- end
254
-
255
- unless association.body['return']
256
- @logger.debug("Could not associate Elastic IP.")
257
- terminate(env)
258
- raise Errors::FogError,
259
- :message => "Could not allocate Elastic IP."
260
- end
261
-
262
- # Save this IP to the data dir so it can be released when the instance is destroyed
263
- if h
264
- ip_file = env[:machine].data_dir.join('elastic_ip')
265
- ip_file.open('w+') do |f|
266
- f.write(h.to_json)
267
- end
268
- end
269
- end
270
-
271
- def handle_elastic_ip_error(env, message)
272
- @logger.debug(message)
273
- terminate(env)
274
- raise Errors::FogError,
275
- :message => message
276
- end
277
- =end
278
-
279
180
  def terminate(env)
280
181
  destroy_env = env.dup
281
182
  destroy_env.delete(:interrupted)
@@ -11,13 +11,10 @@ module VagrantPlugins
11
11
  end
12
12
 
13
13
  def call(env)
14
- #server = env[:mos_compute].servers.get(env[:machine].id)
15
- #server = (env[:mos_compute].describe_instances([env[:machine].id]))["Instance"]
16
14
  if env[:machine].state.id == "ready"
17
15
  env[:ui].info(I18n.t("vagrant_mos.already stopped"))
18
16
  else
19
17
  env[:ui].info(I18n.t("vagrant_mos.stopping"))
20
- #server.stop(!!env[:force_halt])
21
18
  env[:mos_compute].stop_instance(env[:machine].id)
22
19
  end
23
20
 
@@ -12,40 +12,13 @@ module VagrantPlugins
12
12
  end
13
13
 
14
14
  def call(env)
15
- #server = env[:mos_compute].describe_instances(env[:machine].id)
16
- #region = env[:machine].provider_config.region
17
- #region_config = env[:machine].provider_config.get_region_config(region)
18
-
19
- #elastic_ip = region_config.elastic_ip
20
-
21
- # Release the elastic IP
22
- #ip_file = env[:machine].data_dir.join('elastic_ip')
23
- #if ip_file.file?
24
- # release_address(env,ip_file.read)
25
- # ip_file.delete
26
- #end
27
-
28
15
  # Destroy the server and remove the tracking ID
29
16
  env[:ui].info(I18n.t("vagrant_mos.terminating"))
30
- #server.destroy
31
17
  env[:mos_compute].terminate_instance(env[:machine].id)
32
18
  env[:machine].id = nil
33
19
 
34
20
  @app.call(env)
35
21
  end
36
-
37
- # Release an elastic IP address
38
- def release_address(env,eip)
39
- h = JSON.parse(eip)
40
- # Use association_id and allocation_id for VPC, use public IP for EC2
41
- if h['association_id']
42
- env[:mos_compute].disassociate_address(nil,h['association_id'])
43
- env[:mos_compute].release_address(h['allocation_id'])
44
- else
45
- env[:mos_compute].disassociate_address(h['public_ip'])
46
- env[:mos_compute].release_address(h['public_ip'])
47
- end
48
- end
49
22
  end
50
23
  end
51
24
  end
@@ -1,5 +1,5 @@
1
1
  module VagrantPlugins
2
2
  module MOS
3
- VERSION = '0.8.53'
3
+ VERSION = '0.8.54'
4
4
  end
5
5
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: vagrant-mos
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.8.53
4
+ version: 0.8.54
5
5
  platform: ruby
6
6
  authors:
7
7
  - yangcs2009
@@ -118,7 +118,6 @@ files:
118
118
  - lib/vagrant-mos/action/message_already_created.rb
119
119
  - lib/vagrant-mos/action/message_not_created.rb
120
120
  - lib/vagrant-mos/action/message_will_not_destroy.rb
121
- - lib/vagrant-mos/action/package_instance.rb
122
121
  - lib/vagrant-mos/action/read_ssh_info.rb
123
122
  - lib/vagrant-mos/action/read_state.rb
124
123
  - lib/vagrant-mos/action/run_instance.rb
@@ -1,193 +0,0 @@
1
- require "log4r"
2
- require 'vagrant/util/template_renderer'
3
- require 'vagrant-mos/util/timer'
4
- require 'vagrant/action/general/package'
5
-
6
- module VagrantPlugins
7
- module MOS
8
- module Action
9
- # This action packages a running mos-based server into an
10
- # mos-based vagrant box. It does so by burning the associated
11
- # vagrant-mos server instance, into an AMI via fog. Upon
12
- # successful AMI burning, the action will create a .box tarball
13
- # writing a Vagrantfile with the fresh AMI id into it.
14
-
15
- # Vagrant itself comes with a general package action, which
16
- # this plugin action does call. The general action provides
17
- # the actual packaging as well as other options such as
18
- # --include for including additional files and --vagrantfile
19
- # which is pretty much not useful here anyway.
20
-
21
- # The virtualbox package plugin action was loosely used
22
- # as a model for this class.
23
-
24
- class PackageInstance < Vagrant::Action::General::Package
25
- include Vagrant::Util::Retryable
26
-
27
- def initialize(app, env)
28
- @app = app
29
- @logger = Log4r::Logger.new("vagrant_mos::action::package_instance")
30
- env["package.include"] ||= []
31
- env["package.output"] ||= "package.box"
32
- end
33
-
34
- alias_method :general_call, :call
35
- def call(env)
36
- # Initialize metrics if they haven't been
37
- env[:metrics] ||= {}
38
-
39
- # This block attempts to burn the server instance into an AMI
40
- begin
41
- # Get the Fog server object for given machine
42
- #server = env[:mos_compute].servers.get(env[:machine].id)
43
- server = (env[:mos_compute].describe_instances([env[:machine].id]))["Instance"]
44
-
45
- env[:ui].info(I18n.t("vagrant_mos.packaging_instance", :instance_id => server.id))
46
-
47
- # Make the request to MOS to create an AMI from machine's instance
48
- ami_response = server.service.create_image server.id, "#{server.tags["Name"]} Package - #{Time.now.strftime("%Y%m%d-%H%M%S")}", ""
49
-
50
- # Find ami id
51
- @ami_id = ami_response.data[:body]["imageId"]
52
-
53
- # Attempt to burn the mos instance into an AMI within timeout
54
- env[:metrics]["instance_ready_time"] = Util::Timer.time do
55
-
56
- # Get the config, to set the ami burn timeout
57
- region = env[:machine].provider_config.region
58
- region_config = env[:machine].provider_config.get_region_config(region)
59
- tries = region_config.instance_package_timeout / 2
60
-
61
- env[:ui].info(I18n.t("vagrant_mos.burning_ami", :ami_id => @ami_id))
62
-
63
- # Check the status of the AMI every 2 seconds until the ami burn timeout has been reached
64
- begin
65
- retryable(:on => Fog::Errors::TimeoutError, :tries => tries) do
66
- # If we're interrupted don't worry about waiting
67
- next if env[:interrupted]
68
-
69
- # Need to update the ami_obj on each cycle
70
- ami_obj = server.service.images.get(@ami_id)
71
-
72
- # Wait for the server to be ready, raise error if timeout reached
73
- server.wait_for(2) {
74
- if ami_obj.state == "failed"
75
- raise Errors::InstancePackageError,
76
- ami_id: ami_obj.id,
77
- err: ami_obj.state
78
- return
79
- else
80
- # Successful AMI burn will result in true here
81
- ami_obj.ready?
82
- end
83
- }
84
- end
85
- rescue Fog::Errors::TimeoutError
86
- # Notify the user upon timeout
87
- raise Errors::InstancePackageTimeout,
88
- timeout: region_config.instance_package_timeout
89
- end
90
- end
91
- env[:ui].info(I18n.t("vagrant_mos.packaging_instance_complete", :time_seconds => env[:metrics]["instance_ready_time"].to_i))
92
- rescue Fog::Compute::MOS::Error => e
93
- raise Errors::FogError, :message => e.message
94
- end
95
-
96
- # Handles inclusions from --include and --vagrantfile options
97
- setup_package_files(env)
98
-
99
- # Setup the temporary directory for the tarball files
100
- @temp_dir = env[:tmp_path].join(Time.now.to_i.to_s)
101
- env["export.temp_dir"] = @temp_dir
102
- FileUtils.mkpath(env["export.temp_dir"])
103
-
104
- # Create the Vagrantfile and metadata.json files from templates to go in the box
105
- create_vagrantfile(env)
106
- create_metadata_file(env)
107
-
108
- # Just match up a couple environmental variables so that
109
- # the superclass will do the right thing. Then, call the
110
- # superclass to actually create the tarball (.box file)
111
- env["package.directory"] = env["export.temp_dir"]
112
- general_call(env)
113
-
114
- # Always call recover to clean up the temp dir
115
- clean_temp_dir
116
- end
117
-
118
- protected
119
-
120
- # Cleanup temp dir and files
121
- def clean_temp_dir
122
- if @temp_dir && File.exist?(@temp_dir)
123
- FileUtils.rm_rf(@temp_dir)
124
- end
125
- end
126
-
127
- # This method generates the Vagrantfile at the root of the box. Taken from
128
- # VagrantPlugins::ProviderVirtualBox::Action::PackageVagrantfile
129
- def create_vagrantfile env
130
- File.open(File.join(env["export.temp_dir"], "Vagrantfile"), "w") do |f|
131
- f.write(TemplateRenderer.render("vagrant-mos_package_Vagrantfile", {
132
- region: env[:machine].provider_config.region,
133
- ami: @ami_id,
134
- template_root: template_root
135
- }))
136
- end
137
- end
138
-
139
- # This method generates the metadata.json file at the root of the box.
140
- def create_metadata_file env
141
- File.open(File.join(env["export.temp_dir"], "metadata.json"), "w") do |f|
142
- f.write(TemplateRenderer.render("metadata.json", {
143
- template_root: template_root
144
- }))
145
- end
146
- end
147
-
148
- # Sets up --include and --vagrantfile files which may be added as optional
149
- # parameters. Taken from VagrantPlugins::ProviderVirtualBox::Action::SetupPackageFiles
150
- def setup_package_files(env)
151
- files = {}
152
- env["package.include"].each do |file|
153
- source = Pathname.new(file)
154
- dest = nil
155
-
156
- # If the source is relative then we add the file as-is to the include
157
- # directory. Otherwise, we copy only the file into the root of the
158
- # include directory. Kind of strange, but seems to match what people
159
- # expect based on history.
160
- if source.relative?
161
- dest = source
162
- else
163
- dest = source.basename
164
- end
165
-
166
- # Assign the mapping
167
- files[file] = dest
168
- end
169
-
170
- if env["package.vagrantfile"]
171
- # Vagrantfiles are treated special and mapped to a specific file
172
- files[env["package.vagrantfile"]] = "_Vagrantfile"
173
- end
174
-
175
- # Verify the mapping
176
- files.each do |from, _|
177
- raise Vagrant::Errors::PackageIncludeMissing,
178
- file: from if !File.exist?(from)
179
- end
180
-
181
- # Save the mapping
182
- env["package.files"] = files
183
- end
184
-
185
- # Used to find the base location of mos-vagrant templates
186
- def template_root
187
- MOS.source_root.join("templates")
188
- end
189
-
190
- end
191
- end
192
- end
193
- end