bosh-director 1.2583.0 → 1.2596.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,10 +1,10 @@
1
1
  module Bosh::Director
2
2
  class InstanceUpdater::NetworkUpdater
3
- def initialize(instance, vm_model, agent_client, resource_pool_updater, cloud, logger)
3
+ def initialize(instance, vm_model, agent_client, vm_updater, cloud, logger)
4
4
  @instance = instance
5
5
  @vm_model = vm_model
6
6
  @agent_client = agent_client
7
- @resource_pool_updater = resource_pool_updater
7
+ @vm_updater = vm_updater
8
8
  @cloud = cloud
9
9
  @logger = logger
10
10
  end
@@ -12,40 +12,38 @@ module Bosh::Director
12
12
  def update
13
13
  unless @instance.networks_changed?
14
14
  @logger.info('Skipping network re-configuration')
15
- return
15
+ return [@vm_model, @agent_client]
16
16
  end
17
17
 
18
18
  network_settings = @instance.network_settings
19
- @logger.info("Planning to reconfigure network with settings: #{network_settings}")
20
19
 
21
20
  strategies = [
22
21
  ConfigureNetworksStrategy.new(@agent_client, network_settings, @logger),
23
22
  PrepareNetworkChangeStrategy.new(@agent_client, network_settings, @logger),
24
23
  ]
25
24
 
25
+ @logger.info("Planning to reconfigure network with settings: #{network_settings}")
26
26
  selected_strategy = strategies.find { |s| s.before_configure_networks }
27
27
 
28
28
  @cloud.configure_networks(@vm_model.cid, network_settings)
29
29
 
30
30
  selected_strategy.after_configure_networks
31
31
 
32
- rescue Bosh::Clouds::NotSupported => e
33
- # If configure_networks can't configure the network as
34
- # requested, e.g. when the security groups change on AWS,
35
- # configure_networks() will raise an exception and we'll
36
- # recreate the VM to work around it
37
- @logger.info("configure_networks CPI call failed with error: #{e.inspect}")
38
- configure_new_vm
39
- end
32
+ [@vm_model, @agent_client]
40
33
 
41
- private
34
+ rescue Bosh::Clouds::NotSupported => e
35
+ @logger.info("Failed reconfiguring existing VM: #{e.inspect}")
42
36
 
43
- def configure_new_vm
37
+ # If configure_networks CPI method cannot reconfigure VM networking
38
+ # (e.g. when the security groups change on AWS)
39
+ # it raises Bosh::Clouds::NotSupported to indicate new VM is needed.
44
40
  @logger.info('Creating VM with new network configurations')
45
41
  @instance.recreate = true
46
- @resource_pool_updater.update_resource_pool
42
+ @vm_updater.update(nil)
47
43
  end
48
44
 
45
+ private
46
+
49
47
  # Newer agents support prepare_configure_networks/configure_networks messages
50
48
  class ConfigureNetworksStrategy
51
49
  def initialize(agent_client, network_settings, logger)
@@ -0,0 +1,223 @@
1
+ module Bosh::Director
2
+ class InstanceUpdater::VmUpdater
3
+ def initialize(instance, vm_model, agent_client, job_renderer, cloud, max_update_tries, logger)
4
+ @instance = instance
5
+ @vm_model = vm_model
6
+ @agent_client = agent_client
7
+ @job_renderer = job_renderer
8
+ @cloud = cloud
9
+ @max_update_tries = max_update_tries
10
+ @logger = logger
11
+ end
12
+
13
+ def update(new_disk_cid)
14
+ unless @instance.resource_pool_changed? || new_disk_cid
15
+ @logger.info('Skipping VM update')
16
+ return [@vm_model, @agent_client]
17
+ end
18
+
19
+ disk_detacher = DiskDetacher.new(@instance, @vm_model, @agent_client, @cloud, @logger)
20
+ disk_detacher.detach
21
+
22
+ @max_update_tries.times do |try|
23
+ vm_deleter = VmDeleter.new(@instance, @vm_model, @cloud, @logger)
24
+ vm_deleter.delete
25
+
26
+ vm_creator = VmCreator.new(@instance, @cloud, @logger)
27
+ @vm_model, @agent_client = vm_creator.create(new_disk_cid)
28
+
29
+ begin
30
+ # Could raise Bosh::Clouds::NoDiskSpace because some CPIs might lazily create disks
31
+ disk_attacher = DiskAttacher.new(@instance, @vm_model, @agent_client, @cloud, @logger)
32
+ disk_attacher.attach
33
+ break
34
+ rescue Bosh::Clouds::NoDiskSpace => e
35
+ if e.ok_to_retry && try < @max_update_tries-1
36
+ @logger.warn("Retrying attach disk operation #{try}: #{e.inspect}")
37
+ else
38
+ @logger.warn("Failed to attach disk to new VM: #{e.inspect}")
39
+ raise CloudNotEnoughDiskSpace,
40
+ "Not enough disk space to update `#{@instance}'"
41
+ end
42
+ end
43
+ end
44
+
45
+ vm_state_applier = VmStateApplier.new(@instance, @vm_model, @agent_client, @job_renderer, @logger)
46
+ vm_state_applier.apply
47
+
48
+ [@vm_model, @agent_client]
49
+ end
50
+
51
+ def detach
52
+ @logger.info('Detaching VM')
53
+
54
+ disk_detacher = DiskDetacher.new(@instance, @vm_model, @agent_client, @cloud, @logger)
55
+ disk_detacher.detach
56
+
57
+ vm_deleter = VmDeleter.new(@instance, @vm_model, @cloud, @logger)
58
+ vm_deleter.delete
59
+
60
+ @instance.job.resource_pool.add_idle_vm
61
+ end
62
+
63
+ def attach_missing_disk
64
+ if !@instance.model.persistent_disk_cid || @instance.disk_currently_attached?
65
+ @logger.info('Skipping attaching missing VM')
66
+ return
67
+ end
68
+
69
+ begin
70
+ disk_attacher = DiskAttacher.new(@instance, @vm_model, @agent_client, @cloud, @logger)
71
+ disk_attacher.attach
72
+ rescue Bosh::Clouds::NoDiskSpace => e
73
+ @logger.warn("Failed attaching missing disk first time: #{e.inspect}")
74
+ update(@instance.model.persistent_disk_cid)
75
+ end
76
+ end
77
+
78
+ private
79
+
80
+ class VmCreator
81
+ def initialize(instance, cloud, logger)
82
+ @instance = instance
83
+ @cloud = cloud
84
+ @logger = logger
85
+ end
86
+
87
+ def create(new_disk_id)
88
+ @logger.info('Creating VM')
89
+
90
+ deployment = @instance.job.deployment
91
+ resource_pool = @instance.job.resource_pool
92
+
93
+ vm_model = Bosh::Director::VmCreator.create(
94
+ deployment.model,
95
+ resource_pool.stemcell.model,
96
+ resource_pool.cloud_properties,
97
+ @instance.network_settings,
98
+ [@instance.model.persistent_disk_cid, new_disk_id].compact,
99
+ resource_pool.env,
100
+ )
101
+
102
+ begin
103
+ @instance.model.vm = vm_model
104
+ @instance.model.save
105
+
106
+ agent_client = AgentClient.with_defaults(vm_model.agent_id)
107
+ agent_client.wait_until_ready
108
+ rescue Exception => e
109
+ @logger.error("Failed to create/contact VM #{vm_model.cid}: #{e.inspect}")
110
+ VmDeleter.new(@instance, vm_model, @cloud, @logger).delete
111
+ raise e
112
+ end
113
+
114
+ [vm_model, agent_client]
115
+ end
116
+ end
117
+
118
+ class VmDeleter
119
+ def initialize(instance, vm_model, cloud, logger)
120
+ @instance = instance
121
+ @vm_model = vm_model
122
+ @cloud = cloud
123
+ @logger = logger
124
+ end
125
+
126
+ def delete
127
+ @logger.info('Deleting VM')
128
+
129
+ @cloud.delete_vm(@vm_model.cid)
130
+
131
+ @instance.model.db.transaction do
132
+ @instance.model.vm = nil
133
+ @instance.model.save
134
+
135
+ @vm_model.destroy
136
+ end
137
+ end
138
+ end
139
+
140
+ class DiskAttacher
141
+ def initialize(instance, vm_model, agent_client, cloud, logger)
142
+ @instance = instance
143
+ @vm_model = vm_model
144
+ @agent_client = agent_client
145
+ @cloud = cloud
146
+ @logger = logger
147
+ end
148
+
149
+ def attach
150
+ if @instance.model.persistent_disk_cid.nil?
151
+ @logger.info('Skipping disk attaching')
152
+ return
153
+ end
154
+
155
+ @cloud.attach_disk(@vm_model.cid, @instance.model.persistent_disk_cid)
156
+
157
+ @agent_client.mount_disk(@instance.model.persistent_disk_cid)
158
+ end
159
+ end
160
+
161
+ class DiskDetacher
162
+ def initialize(instance, vm_model, agent_client, cloud, logger)
163
+ @instance = instance
164
+ @vm_model = vm_model
165
+ @agent_client = agent_client
166
+ @cloud = cloud
167
+ @logger = logger
168
+ end
169
+
170
+ def detach
171
+ unless @instance.disk_currently_attached?
172
+ @logger.info('Skipping disk detaching')
173
+ return
174
+ end
175
+
176
+ if @instance.model.persistent_disk_cid.nil?
177
+ raise AgentUnexpectedDisk,
178
+ "`#{@instance}' VM has disk attached but it's not reflected in director DB"
179
+ end
180
+
181
+ @agent_client.unmount_disk(@instance.model.persistent_disk_cid)
182
+
183
+ @cloud.detach_disk(@vm_model.cid, @instance.model.persistent_disk_cid)
184
+ end
185
+ end
186
+
187
+ class VmStateApplier
188
+ def initialize(instance, vm_model, agent_client, job_renderer, logger)
189
+ @instance = instance
190
+ @vm_model = vm_model
191
+ @agent_client = agent_client
192
+ @job_renderer = job_renderer
193
+ @logger = logger
194
+ end
195
+
196
+ def apply
197
+ @logger.info('Applying VM state')
198
+
199
+ state = {
200
+ 'deployment' => @instance.job.deployment.name,
201
+ 'networks' => @instance.network_settings,
202
+ 'resource_pool' => @instance.job.resource_pool.spec,
203
+ 'job' => @instance.job.spec,
204
+ 'index' => @instance.index,
205
+ }
206
+
207
+ if @instance.disk_size > 0
208
+ state['persistent_disk'] = @instance.disk_size
209
+ end
210
+
211
+ @vm_model.update(:apply_spec => state)
212
+
213
+ @agent_client.apply(state)
214
+
215
+ # Agent will potentially return modified version of state
216
+ # with resolved dynamic networks information
217
+ @instance.current_state = @agent_client.get_state
218
+
219
+ @job_renderer.render_job_instance(@instance)
220
+ end
221
+ end
222
+ end
223
+ end
@@ -4,35 +4,43 @@ require 'bosh/director/core/templates/job_instance_renderer'
4
4
  module Bosh::Director
5
5
  class JobRenderer
6
6
  # @param [DeploymentPlan::Job]
7
- def initialize(job)
7
+ def initialize(job, blobstore)
8
8
  @job = job
9
+ @blobstore = blobstore
10
+
9
11
  job_template_loader = Core::Templates::JobTemplateLoader.new(Config.logger)
10
12
  @instance_renderer = Core::Templates::JobInstanceRenderer.new(@job.templates, job_template_loader)
11
13
  end
12
14
 
13
- def render_job_instances(blobstore)
14
- @job.instances.each do |instance|
15
- rendered_job_instance = @instance_renderer.render(instance.spec)
16
-
17
- configuration_hash = rendered_job_instance.configuration_hash
18
-
19
- archive_model = instance.model.latest_rendered_templates_archive
20
- if archive_model && archive_model.content_sha1 == configuration_hash
21
- rendered_templates_archive = Core::Templates::RenderedTemplatesArchive.new(archive_model.blobstore_id, archive_model.sha1)
22
- else
23
- rendered_templates_archive = rendered_job_instance.persist(blobstore)
24
- instance.model.add_rendered_templates_archive(
25
- blobstore_id: rendered_templates_archive.blobstore_id,
26
- sha1: rendered_templates_archive.sha1,
27
- content_sha1: configuration_hash,
28
- created_at: Time.now,
29
- )
30
- end
31
-
32
- instance.configuration_hash = configuration_hash
33
- instance.template_hashes = rendered_job_instance.template_hashes
34
- instance.rendered_templates_archive = rendered_templates_archive
15
+ def render_job_instances
16
+ @job.instances.each { |instance| render_job_instance(instance) }
17
+ end
18
+
19
+ def render_job_instance(instance)
20
+ rendered_job_instance = @instance_renderer.render(instance.spec)
21
+
22
+ configuration_hash = rendered_job_instance.configuration_hash
23
+
24
+ archive_model = instance.model.latest_rendered_templates_archive
25
+
26
+ if archive_model && archive_model.content_sha1 == configuration_hash
27
+ rendered_templates_archive = Core::Templates::RenderedTemplatesArchive.new(
28
+ archive_model.blobstore_id,
29
+ archive_model.sha1,
30
+ )
31
+ else
32
+ rendered_templates_archive = rendered_job_instance.persist(@blobstore)
33
+ instance.model.add_rendered_templates_archive(
34
+ blobstore_id: rendered_templates_archive.blobstore_id,
35
+ sha1: rendered_templates_archive.sha1,
36
+ content_sha1: configuration_hash,
37
+ created_at: Time.now,
38
+ )
35
39
  end
40
+
41
+ instance.configuration_hash = configuration_hash
42
+ instance.template_hashes = rendered_job_instance.template_hashes
43
+ instance.rendered_templates_archive = rendered_templates_archive
36
44
  end
37
45
  end
38
46
  end
@@ -1,16 +1,19 @@
1
1
  module Bosh::Director
2
2
  class JobUpdater
3
- # @param [Bosh::Director::DeploymentPlan] deployment_plan
4
- # @param [DeploymentPlan::Job] job
5
- def initialize(deployment_plan, job)
3
+ # @param [Bosh::Director::DeploymentPlan::Planner] deployment_plan
4
+ # @param [Bosh::Director::DeploymentPlan::Job] job
5
+ # @param [Bosh::Director::JobRenderer] job_renderer
6
+ def initialize(deployment_plan, job, job_renderer)
6
7
  @deployment_plan = deployment_plan
7
8
  @job = job
9
+ @job_renderer = job_renderer
10
+
8
11
  @logger = Config.logger
9
12
  @event_log = Config.event_log
10
13
  end
11
14
 
12
15
  def update
13
- @logger.info("Deleting no longer needed instances")
16
+ @logger.info('Deleting no longer needed instances')
14
17
  delete_unneeded_instances
15
18
 
16
19
  instances = []
@@ -31,7 +34,7 @@ module Bosh::Director
31
34
  @logger.info("Starting canary update num_canaries=#{num_canaries}")
32
35
  update_canaries(pool, instances, num_canaries, event_log_stage)
33
36
 
34
- @logger.info("Waiting for canaries to update")
37
+ @logger.info('Waiting for canaries to update')
35
38
  pool.wait
36
39
 
37
40
  @logger.info("Finished canary update")
@@ -68,7 +71,7 @@ module Bosh::Director
68
71
  event_log_stage.advance_and_track("#{desc} (canary)") do |ticker|
69
72
  with_thread_name("canary_update(#{desc})") do
70
73
  begin
71
- InstanceUpdater.new(instance, ticker).update(:canary => true)
74
+ InstanceUpdater.new(instance, ticker, @job_renderer).update(:canary => true)
72
75
  rescue Exception => e
73
76
  @logger.error("Error updating canary instance: #{e.inspect}\n#{e.backtrace.join("\n")}")
74
77
  raise
@@ -88,7 +91,7 @@ module Bosh::Director
88
91
  event_log_stage.advance_and_track(desc) do |ticker|
89
92
  with_thread_name("instance_update(#{desc})") do
90
93
  begin
91
- InstanceUpdater.new(instance, ticker).update
94
+ InstanceUpdater.new(instance, ticker, @job_renderer).update
92
95
  rescue Exception => e
93
96
  @logger.error("Error updating instance: #{e.inspect}\n#{e.backtrace.join("\n")}")
94
97
  raise
@@ -0,0 +1,12 @@
1
+ module Bosh::Director
2
+ class JobUpdaterFactory
3
+ def initialize(blobstore)
4
+ @blobstore = blobstore
5
+ end
6
+
7
+ def new_job_updater(deployment_plan, job)
8
+ job_renderer = JobRenderer.new(job, @blobstore)
9
+ JobUpdater.new(deployment_plan, job, job_renderer)
10
+ end
11
+ end
12
+ end
@@ -12,6 +12,8 @@ module Bosh::Director
12
12
  # @param [String] manifest_file Path to deployment manifest
13
13
  # @param [Hash] options Deployment options
14
14
  def initialize(manifest_file, options = {})
15
+ @blobstore = App.instance.blobstores.blobstore
16
+
15
17
  logger.info('Reading deployment manifest')
16
18
  @manifest_file = manifest_file
17
19
  @manifest = File.read(@manifest_file)
@@ -47,7 +49,8 @@ module Bosh::Director
47
49
 
48
50
  def update
49
51
  resource_pools = DeploymentPlan::ResourcePools.new(event_log, @resource_pool_updaters)
50
- multi_job_updater = DeploymentPlan::BatchMultiJobUpdater.new
52
+ job_updater_factory = JobUpdaterFactory.new(@blobstore)
53
+ multi_job_updater = DeploymentPlan::BatchMultiJobUpdater.new(job_updater_factory)
51
54
  updater = DeploymentPlan::Updater.new(self, event_log, resource_pools, @assembler, @deployment_plan, multi_job_updater)
52
55
  updater.update
53
56
  end
@@ -1,6 +1,6 @@
1
1
  # Copyright (c) 2009-2012 VMware, Inc.
2
2
 
3
- require 'common/version_number'
3
+ require 'common/version/release_version'
4
4
 
5
5
  module Bosh::Director
6
6
  module Jobs
@@ -106,7 +106,7 @@ module Bosh::Director
106
106
  @name = @manifest["name"]
107
107
 
108
108
  begin
109
- @version = Bosh::Common::VersionNumber.parse(@manifest["version"])
109
+ @version = Bosh::Common::Version::ReleaseVersion.parse(@manifest["version"])
110
110
  unless @version == @manifest["version"]
111
111
  logger.info("Formatted version '#{@manifest["version"]}' => '#{@version}'")
112
112
  end
@@ -170,8 +170,7 @@ module Bosh::Director
170
170
  event_log.track("#{@name}/#{@version}") {}
171
171
  end
172
172
 
173
- # Normalizes release manifest, so all names, versions, and checksums
174
- # are Strings.
173
+ # Normalizes release manifest, so all names, versions, and checksums are Strings.
175
174
  # @return [void]
176
175
  def normalize_manifest
177
176
  Bosh::Director.hash_string_vals(@manifest, 'name', 'version')
@@ -219,28 +218,18 @@ module Bosh::Director
219
218
  existing_packages = []
220
219
 
221
220
  @manifest["packages"].each do |package_meta|
222
- filter = {:sha1 => package_meta["sha1"]}
223
- if package_meta["fingerprint"]
224
- filter[:fingerprint] = package_meta["fingerprint"]
225
- filter = filter.sql_or
226
- end
227
-
228
221
  # Checking whether we might have the same bits somewhere
229
- packages = Models::Package.where(filter).all
222
+ packages = Models::Package.where(fingerprint: package_meta["fingerprint"]).all
230
223
 
231
224
  if packages.empty?
232
225
  new_packages << package_meta
233
226
  next
234
227
  end
235
228
 
236
- # We can reuse an existing package as long as it
237
- # belongs to the same release and has the same name and version.
238
229
  existing_package = packages.find do |package|
239
230
  package.release_id == @release_model.id &&
240
231
  package.name == package_meta["name"] &&
241
232
  package.version == package_meta["version"]
242
- # NOT checking dependencies here b/c dependency change would
243
- # bump the package version anyway.
244
233
  end
245
234
 
246
235
  if existing_package
@@ -251,6 +240,7 @@ module Bosh::Director
251
240
  # of the package blob and create a new db entry for it
252
241
  package = packages.first
253
242
  package_meta["blobstore_id"] = package.blobstore_id
243
+ package_meta["sha1"] = package.sha1
254
244
  new_packages << package_meta
255
245
  end
256
246
  end
@@ -284,25 +274,10 @@ module Bosh::Director
284
274
  def use_existing_packages(packages)
285
275
  return if packages.empty?
286
276
 
287
- n_packages = packages.size
288
- event_log.begin_stage("Processing #{n_packages} existing " +
289
- "package#{n_packages > 1 ? "s" : ""}", 1)
290
-
291
- event_log.track("Verifying checksums") do
292
- packages.each do |package, package_meta|
277
+ single_step_stage("Processing #{packages.size} existing package#{"s" if packages.size > 1}") do
278
+ packages.each do |package, _|
293
279
  package_desc = "#{package.name}/#{package.version}"
294
- logger.info("Package `#{package_desc}' already exists, " +
295
- "verifying checksum")
296
-
297
- expected = package.sha1
298
- received = package_meta["sha1"]
299
-
300
- if expected != received
301
- raise ReleaseExistingPackageHashMismatch,
302
- "`#{package_desc}' checksum mismatch, " +
303
- "expected #{expected} but received #{received}"
304
- end
305
- logger.info("Package `#{package_desc}' verified")
280
+ logger.info("Using existing package `#{package_desc}'")
306
281
  register_package(package)
307
282
  end
308
283
  end
@@ -359,8 +334,7 @@ module Bosh::Director
359
334
 
360
335
  # Finds job template definitions in release manifest and sorts them into
361
336
  # two buckets: new and existing job templates, then creates new job
362
- # template records in the database and points release version to existing
363
- # ones.
337
+ # template records in the database and points release version to existing ones.
364
338
  # @return [void]
365
339
  def process_jobs
366
340
  logger.info("Checking for new jobs in release")
@@ -369,14 +343,8 @@ module Bosh::Director
369
343
  existing_jobs = []
370
344
 
371
345
  @manifest["jobs"].each do |job_meta|
372
- filter = {:sha1 => job_meta["sha1"]}
373
- if job_meta["fingerprint"]
374
- filter[:fingerprint] = job_meta["fingerprint"]
375
- filter = filter.sql_or
376
- end
377
-
378
346
  # Checking whether we might have the same bits somewhere
379
- jobs = Models::Template.where(filter).all
347
+ jobs = Models::Template.where(fingerprint: job_meta["fingerprint"]).all
380
348
 
381
349
  template = jobs.find do |job|
382
350
  job.release_id == @release_model.id &&
@@ -454,8 +422,7 @@ module Bosh::Director
454
422
  path = File.join(job_dir, "templates", relative_path)
455
423
  unless File.file?(path)
456
424
  raise JobMissingTemplateFile,
457
- "Missing template file `#{relative_path}' " +
458
- "for job `#{template.name}'"
425
+ "Missing template file `#{relative_path}' for job `#{template.name}'"
459
426
  end
460
427
  end
461
428
  end
@@ -507,27 +474,10 @@ module Bosh::Director
507
474
  def use_existing_jobs(jobs)
508
475
  return if jobs.empty?
509
476
 
510
- n_jobs = jobs.size
511
- event_log.begin_stage("Processing #{n_jobs} existing " +
512
- "job#{n_jobs > 1 ? "s" : ""}", 1)
513
-
514
- event_log.track("Verifying checksums") do
515
- jobs.each do |template, job_meta|
477
+ single_step_stage("Processing #{jobs.size} existing job#{"s" if jobs.size > 1}") do
478
+ jobs.each do |template, _|
516
479
  job_desc = "#{template.name}/#{template.version}"
517
-
518
- logger.info("Job `#{job_desc}' already exists, " +
519
- "verifying checksum")
520
-
521
- expected = template.sha1
522
- received = job_meta["sha1"]
523
-
524
- if expected != received
525
- raise ReleaseExistingJobHashMismatch,
526
- "`#{job_desc}' checksum mismatch, " +
527
- "expected #{expected} but received #{received}"
528
- end
529
-
530
- logger.info("Job `#{job_desc}' verified")
480
+ logger.info("Using existing job `#{job_desc}'")
531
481
  register_template(template)
532
482
  end
533
483
  end
@@ -545,13 +495,11 @@ module Bosh::Director
545
495
  # Returns the next release version (to be used for rebased release)
546
496
  # @return [String]
547
497
  def next_release_version
548
- attrs = {
549
- :release_id => @release_model.id
550
- }
498
+ attrs = {:release_id => @release_model.id}
551
499
  models = Models::ReleaseVersion.filter(attrs).all
552
500
  strings = models.map(&:version)
553
- versions = Bosh::Common::VersionNumber.parse_list(strings)
554
- Bosh::Director::NextRebaseVersion.new(versions).calculate(@version)
501
+ list = Bosh::Common::Version::ReleaseVersionList.parse(strings)
502
+ list.rebase(@version)
555
503
  end
556
504
 
557
505
  # Removes release version model, along with all packages and templates.