googlecloud 0.0.2 → 0.0.4
Sign up to get free protection for your applications and to get access to all the features.
- data.tar.gz.sig +0 -0
- data/CHANGELOG +4 -0
- data/LICENSE +674 -0
- data/Manifest +111 -0
- data/README.md +4 -3
- data/bin/gcutil +53 -0
- data/googlecloud.gemspec +4 -3
- data/packages/gcutil-1.7.1/CHANGELOG +197 -0
- data/packages/gcutil-1.7.1/LICENSE +202 -0
- data/packages/gcutil-1.7.1/VERSION +1 -0
- data/packages/gcutil-1.7.1/gcutil +53 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/LICENSE +23 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/apiclient/__init__.py +1 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/apiclient/discovery.py +743 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/apiclient/errors.py +123 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/apiclient/ext/__init__.py +0 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/apiclient/http.py +1443 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/apiclient/mimeparse.py +172 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/apiclient/model.py +385 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/apiclient/schema.py +303 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/oauth2client/__init__.py +1 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/oauth2client/anyjson.py +32 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/oauth2client/appengine.py +528 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/oauth2client/client.py +1139 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/oauth2client/clientsecrets.py +105 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/oauth2client/crypt.py +244 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/oauth2client/django_orm.py +124 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/oauth2client/file.py +107 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/oauth2client/locked_file.py +343 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/oauth2client/multistore_file.py +379 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/oauth2client/tools.py +174 -0
- data/packages/gcutil-1.7.1/lib/google_api_python_client/uritemplate/__init__.py +147 -0
- data/packages/gcutil-1.7.1/lib/google_apputils/LICENSE +202 -0
- data/packages/gcutil-1.7.1/lib/google_apputils/google/__init__.py +3 -0
- data/packages/gcutil-1.7.1/lib/google_apputils/google/apputils/__init__.py +3 -0
- data/packages/gcutil-1.7.1/lib/google_apputils/google/apputils/app.py +356 -0
- data/packages/gcutil-1.7.1/lib/google_apputils/google/apputils/appcommands.py +783 -0
- data/packages/gcutil-1.7.1/lib/google_apputils/google/apputils/basetest.py +1260 -0
- data/packages/gcutil-1.7.1/lib/google_apputils/google/apputils/datelib.py +421 -0
- data/packages/gcutil-1.7.1/lib/google_apputils/google/apputils/debug.py +60 -0
- data/packages/gcutil-1.7.1/lib/google_apputils/google/apputils/file_util.py +181 -0
- data/packages/gcutil-1.7.1/lib/google_apputils/google/apputils/resources.py +67 -0
- data/packages/gcutil-1.7.1/lib/google_apputils/google/apputils/run_script_module.py +217 -0
- data/packages/gcutil-1.7.1/lib/google_apputils/google/apputils/setup_command.py +159 -0
- data/packages/gcutil-1.7.1/lib/google_apputils/google/apputils/shellutil.py +49 -0
- data/packages/gcutil-1.7.1/lib/google_apputils/google/apputils/stopwatch.py +204 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/__init__.py +0 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/auth_helper.py +140 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/auth_helper_test.py +149 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/auto_auth.py +130 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/auto_auth_test.py +75 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/basic_cmds.py +128 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/basic_cmds_test.py +111 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/command_base.py +1808 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/command_base_test.py +1651 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/compute/v1beta13.json +2851 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/compute/v1beta14.json +3361 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/disk_cmds.py +342 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/disk_cmds_test.py +474 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/firewall_cmds.py +344 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/firewall_cmds_test.py +231 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/flags_cache.py +274 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/gcutil +89 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/gcutil_logging.py +69 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/image_cmds.py +262 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/image_cmds_test.py +172 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/instance_cmds.py +1506 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/instance_cmds_test.py +1904 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/kernel_cmds.py +91 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/kernel_cmds_test.py +56 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/machine_type_cmds.py +106 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/machine_type_cmds_test.py +59 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/metadata.py +96 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/metadata_lib.py +357 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/metadata_test.py +84 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/mock_api.py +420 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/mock_metadata.py +58 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/move_cmds.py +824 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/move_cmds_test.py +307 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/network_cmds.py +178 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/network_cmds_test.py +133 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/operation_cmds.py +181 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/operation_cmds_test.py +196 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/path_initializer.py +38 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/project_cmds.py +173 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/project_cmds_test.py +111 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/scopes.py +61 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/scopes_test.py +50 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/snapshot_cmds.py +276 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/snapshot_cmds_test.py +260 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/ssh_keys.py +266 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/ssh_keys_test.py +128 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/table_formatter.py +563 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/thread_pool.py +188 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/thread_pool_test.py +88 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/utils.py +208 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/utils_test.py +193 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/version.py +17 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/version_checker.py +246 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/version_checker_test.py +271 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/zone_cmds.py +151 -0
- data/packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/zone_cmds_test.py +60 -0
- data/packages/gcutil-1.7.1/lib/httplib2/LICENSE +21 -0
- data/packages/gcutil-1.7.1/lib/httplib2/httplib2/__init__.py +1630 -0
- data/packages/gcutil-1.7.1/lib/httplib2/httplib2/cacerts.txt +714 -0
- data/packages/gcutil-1.7.1/lib/httplib2/httplib2/iri2uri.py +110 -0
- data/packages/gcutil-1.7.1/lib/httplib2/httplib2/socks.py +438 -0
- data/packages/gcutil-1.7.1/lib/iso8601/LICENSE +20 -0
- data/packages/gcutil-1.7.1/lib/iso8601/iso8601/__init__.py +1 -0
- data/packages/gcutil-1.7.1/lib/iso8601/iso8601/iso8601.py +102 -0
- data/packages/gcutil-1.7.1/lib/iso8601/iso8601/test_iso8601.py +111 -0
- data/packages/gcutil-1.7.1/lib/python_gflags/AUTHORS +2 -0
- data/packages/gcutil-1.7.1/lib/python_gflags/LICENSE +28 -0
- data/packages/gcutil-1.7.1/lib/python_gflags/gflags.py +2862 -0
- data/packages/gcutil-1.7.1/lib/python_gflags/gflags2man.py +544 -0
- data/packages/gcutil-1.7.1/lib/python_gflags/gflags_validators.py +187 -0
- metadata +118 -5
- metadata.gz.sig +0 -0
@@ -0,0 +1,58 @@
|
|
1
|
+
# Copyright 2012 Google Inc. All Rights Reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
"""Test utilities for mocking out metadata_lib.Metadata."""
|
16
|
+
|
17
|
+
|
18
|
+
|
19
|
+
|
20
|
+
class MockMetadata(object):
|
21
|
+
def __init__(self):
|
22
|
+
self._is_present_calls = []
|
23
|
+
self._get_access_token_calls = []
|
24
|
+
self._get_access_scopes_calls = []
|
25
|
+
self._is_present_return_values = []
|
26
|
+
self._get_access_token_return_values = []
|
27
|
+
self._get_access_scopes_return_values = []
|
28
|
+
|
29
|
+
def ExpectIsPresent(self, and_return):
|
30
|
+
self._is_present_return_values.append(and_return)
|
31
|
+
|
32
|
+
def ExpectGetAccessToken(self, and_return):
|
33
|
+
self._get_access_token_return_values.append(and_return)
|
34
|
+
|
35
|
+
def ExpectGetAccessScopes(self, and_return):
|
36
|
+
self._get_access_scopes_return_values.append(and_return)
|
37
|
+
|
38
|
+
def IsPresent(self):
|
39
|
+
self._is_present_calls.append({})
|
40
|
+
return self._is_present_return_values.pop(0)
|
41
|
+
|
42
|
+
def GetAccessToken(self, scopes, service_account='default',
|
43
|
+
any_available=True):
|
44
|
+
self._get_access_token_calls.append(
|
45
|
+
{'scopes': ' '.join(scopes),
|
46
|
+
'service_account': service_account,
|
47
|
+
'any_available': any_available})
|
48
|
+
return self._get_access_token_return_values.pop(0)
|
49
|
+
|
50
|
+
def GetAccessScopes(self, service_account='default'):
|
51
|
+
self._get_access_scopes_calls.append(
|
52
|
+
{'service_account': service_account})
|
53
|
+
return self._get_access_scopes_return_values.pop(0)
|
54
|
+
|
55
|
+
def ExpectsMoreCalls(self):
|
56
|
+
return sum(map(len, [self._is_present_return_values,
|
57
|
+
self._get_access_token_return_values,
|
58
|
+
self._get_access_scopes_return_values])) > 0
|
@@ -0,0 +1,824 @@
|
|
1
|
+
# Copyright 2012 Google Inc. All Rights Reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
"""Commands for moving resources from one zone to another."""
|
16
|
+
|
17
|
+
|
18
|
+
|
19
|
+
import collections
|
20
|
+
import datetime
|
21
|
+
import json
|
22
|
+
import os
|
23
|
+
import textwrap
|
24
|
+
import time
|
25
|
+
import uuid
|
26
|
+
|
27
|
+
from google.apputils import app
|
28
|
+
from google.apputils import appcommands
|
29
|
+
import gflags as flags
|
30
|
+
|
31
|
+
from gcutil import command_base
|
32
|
+
from gcutil import gcutil_logging
|
33
|
+
from gcutil import utils
|
34
|
+
from gcutil import version
|
35
|
+
|
36
|
+
|
37
|
+
LOGGER = gcutil_logging.LOGGER
|
38
|
+
|
39
|
+
MAX_INSTANCES_TO_MOVE = 100
|
40
|
+
MAX_DISKS_TO_MOVE = 100
|
41
|
+
|
42
|
+
|
43
|
+
class MoveInstancesBase(command_base.GoogleComputeCommand):
|
44
|
+
"""The base class for the move commands."""
|
45
|
+
|
46
|
+
def __init__(self, name, flag_values):
|
47
|
+
super(MoveInstancesBase, self).__init__(name, flag_values)
|
48
|
+
|
49
|
+
flags.DEFINE_boolean(
|
50
|
+
'force',
|
51
|
+
False,
|
52
|
+
'Override the confirmation prompt.',
|
53
|
+
flag_values=flag_values)
|
54
|
+
|
55
|
+
flags.DEFINE_boolean(
|
56
|
+
'keep_snapshots',
|
57
|
+
False,
|
58
|
+
'Do not delete snapshots that were created for the disks.',
|
59
|
+
flag_values=flag_values)
|
60
|
+
|
61
|
+
def SetApi(self, api):
|
62
|
+
self._disks_api = api.disks()
|
63
|
+
self._instances_api = api.instances()
|
64
|
+
self._machine_type_api = api.machineTypes()
|
65
|
+
self._projects_api = api.projects()
|
66
|
+
self._snapshots_api = api.snapshots()
|
67
|
+
self._zones_api = api.zones()
|
68
|
+
|
69
|
+
def Handle(self, *args, **kwargs):
|
70
|
+
"""The point of entry to the command.
|
71
|
+
|
72
|
+
This dispatches the subclass' HandleMove method.
|
73
|
+
|
74
|
+
Raises:
|
75
|
+
UsageError: If the service version is not v1beta14 or higher.
|
76
|
+
The dependency on the version is due to the fact that
|
77
|
+
snapshots were introduced in v1beta14.
|
78
|
+
"""
|
79
|
+
if not self._IsUsingAtLeastApiVersion('v1beta14'):
|
80
|
+
raise app.UsageError(
|
81
|
+
'This command requires using API version v1beta14 or higher.')
|
82
|
+
self._project_resource = self._projects_api.get(
|
83
|
+
project=self._project).execute()
|
84
|
+
self.HandleMove(*args, **kwargs)
|
85
|
+
print 'The move completed successfully.'
|
86
|
+
|
87
|
+
def _Confirm(self, instances_to_mv, instances_to_ignore, disks_to_mv,
|
88
|
+
dest_zone):
|
89
|
+
"""Displays what is about to happen and prompts the user to proceed.
|
90
|
+
|
91
|
+
Args:
|
92
|
+
instances_to_mv: The instances that will be moved.
|
93
|
+
instances_to_ignore: Instances that will not be moved because they're
|
94
|
+
already in the destination zone.
|
95
|
+
disks_to_mv: A list of the disk names that will be moved.
|
96
|
+
dest_zone: The destination zone.
|
97
|
+
|
98
|
+
Raises:
|
99
|
+
CommandError: If the user declines to proceed.
|
100
|
+
"""
|
101
|
+
# Ensures that the parameters make sense.
|
102
|
+
assert instances_to_mv, (
|
103
|
+
'Cannot confirm move if there are no instances to move.')
|
104
|
+
assert not [i for i in instances_to_mv if i['zone'].endswith(dest_zone)], (
|
105
|
+
'Some instances in the move set are already in the destination zone.')
|
106
|
+
assert ([i for i in instances_to_ignore if i['zone'].endswith(dest_zone)] ==
|
107
|
+
instances_to_ignore), (
|
108
|
+
'Not all instances in ignore set are in destination zone.')
|
109
|
+
|
110
|
+
if instances_to_ignore:
|
111
|
+
print ('These instances are already in %s and will not be moved:' %
|
112
|
+
dest_zone)
|
113
|
+
print utils.ListStrings(i['name'] for i in instances_to_ignore)
|
114
|
+
|
115
|
+
print 'The following instances will be moved to %s:' % dest_zone
|
116
|
+
print utils.ListStrings(i['name'] for i in instances_to_mv)
|
117
|
+
|
118
|
+
if disks_to_mv:
|
119
|
+
print 'The following disks will be moved to %s:' % dest_zone
|
120
|
+
print utils.ListStrings(disks_to_mv)
|
121
|
+
|
122
|
+
if not self._flags.force and not utils.Proceed():
|
123
|
+
raise command_base.CommandError('Move aborted.')
|
124
|
+
|
125
|
+
def _DeleteInstances(self, instances, zone):
|
126
|
+
"""Deletes the given instances.
|
127
|
+
|
128
|
+
Args:
|
129
|
+
instances: A list of instance resources.
|
130
|
+
zone: The zone to which the instances belong.
|
131
|
+
|
132
|
+
Raises:
|
133
|
+
CommandError: If one or more of the deletions fail.
|
134
|
+
"""
|
135
|
+
if not instances:
|
136
|
+
return
|
137
|
+
|
138
|
+
print 'Deleting instances...'
|
139
|
+
requests = []
|
140
|
+
for instance in instances:
|
141
|
+
requests.append(self._instances_api.delete(
|
142
|
+
project=self._project,
|
143
|
+
zone=zone,
|
144
|
+
instance=instance['name']))
|
145
|
+
results, exceptions = self.ExecuteRequests(
|
146
|
+
requests, collection_name='instances')
|
147
|
+
if exceptions:
|
148
|
+
raise command_base.CommandError(
|
149
|
+
'Aborting due to errors while deleting instances:\n%s' %
|
150
|
+
utils.ListStrings(exceptions))
|
151
|
+
self._CheckForErrorsInOps(self.MakeListResult(results, 'operationList'))
|
152
|
+
|
153
|
+
def _CreateInstances(self, instances, src_zone, dest_zone):
|
154
|
+
"""Creates the instance resources in the given list in dest_zone.
|
155
|
+
|
156
|
+
The instance resources are changed in two ways:
|
157
|
+
(1) Their zone fields are changed to dest_zone; and
|
158
|
+
(2) Their ephemeral IPs are cleared.
|
159
|
+
|
160
|
+
Args:
|
161
|
+
instances: A list of instance resources.
|
162
|
+
src_zone: The zone to which the instances belong.
|
163
|
+
dest_zone: The destination zone.
|
164
|
+
|
165
|
+
Raises:
|
166
|
+
CommandError: If one or more of the insertions fail.
|
167
|
+
"""
|
168
|
+
if not instances:
|
169
|
+
return
|
170
|
+
|
171
|
+
print 'Recreating instances in %s...' % dest_zone
|
172
|
+
ip_addresses = set(self._project_resource.get('externalIpAddresses', []))
|
173
|
+
self._SetIps(instances, ip_addresses)
|
174
|
+
|
175
|
+
requests = []
|
176
|
+
for instance in instances:
|
177
|
+
instance['zone'] = self.NormalizeTopLevelResourceName(
|
178
|
+
self._project, 'zones', dest_zone)
|
179
|
+
|
180
|
+
# Replaces the zones for the persistent disks.
|
181
|
+
for disk in instance['disks']:
|
182
|
+
if 'source' in disk:
|
183
|
+
disk['source'] = disk['source'].replace(
|
184
|
+
'zones/' + src_zone, 'zones/' + dest_zone)
|
185
|
+
|
186
|
+
requests.append(self._instances_api.insert(
|
187
|
+
project=self._project, body=instance, zone=dest_zone))
|
188
|
+
results, exceptions = self.ExecuteRequests(
|
189
|
+
requests, collection_name='instances')
|
190
|
+
if exceptions:
|
191
|
+
raise command_base.CommandError(
|
192
|
+
'Aborting due to errors while creating instances:\n%s' %
|
193
|
+
utils.ListStrings(exceptions))
|
194
|
+
self._CheckForErrorsInOps(self.MakeListResult(results, 'operationList'))
|
195
|
+
|
196
|
+
def _CheckForErrorsInOps(self, results):
|
197
|
+
"""Raises CommandError if any operations in results contains an error."""
|
198
|
+
_, ops = self._PartitionResults(results)
|
199
|
+
errors = []
|
200
|
+
for op in (ops or []):
|
201
|
+
if 'error' in op and 'errors' in op['error'] and op['error']['errors']:
|
202
|
+
error = op['error']['errors'][0].get('message')
|
203
|
+
if error:
|
204
|
+
errors.append(error)
|
205
|
+
if errors:
|
206
|
+
raise command_base.CommandError(
|
207
|
+
'Encountered errors:\n%s' % utils.ListStrings(errors))
|
208
|
+
|
209
|
+
def _SetIps(self, instances, ip_addresses):
|
210
|
+
"""Clears the natIP field for instances without reserved addresses."""
|
211
|
+
for instance in instances:
|
212
|
+
for interface in instance.get('networkInterfaces', []):
|
213
|
+
for config in interface.get('accessConfigs', []):
|
214
|
+
if 'natIP' in config and config['natIP'] not in ip_addresses:
|
215
|
+
config['natIP'] = None
|
216
|
+
|
217
|
+
def _WaitForSnapshots(self, snapshots):
|
218
|
+
"""Waits for the given snapshots to be in the READY state."""
|
219
|
+
snapshots = set(snapshots)
|
220
|
+
start_sec = time.time()
|
221
|
+
while True:
|
222
|
+
if time.time() - start_sec > self._flags.max_wait_time:
|
223
|
+
raise command_base.CommandError(
|
224
|
+
'Timeout reached while waiting for snapshots to be ready.')
|
225
|
+
|
226
|
+
all_snapshots = [
|
227
|
+
s for s in utils.All(self._snapshots_api.list, self._project)['items']
|
228
|
+
if s['name'] in snapshots and s['status'] != 'READY']
|
229
|
+
if not all_snapshots:
|
230
|
+
break
|
231
|
+
LOGGER.info('Waiting for snapshots to be READY. Sleeping for %ss' %
|
232
|
+
self._flags.sleep_between_polls)
|
233
|
+
time.sleep(self._flags.sleep_between_polls)
|
234
|
+
|
235
|
+
def _CreateSnapshots(self, snapshot_mappings, src_zone, dest_zone):
|
236
|
+
"""Creates snapshots for the disks to be moved.
|
237
|
+
|
238
|
+
Args:
|
239
|
+
snapshot_mappings: A map of disk names that should be moved to
|
240
|
+
the names that should be used for each disk's snapshot.
|
241
|
+
src_zone: The source zone. All disks in snapshot_mappings must be
|
242
|
+
in this zone.
|
243
|
+
dest_zone: The zone the disks are destined for.
|
244
|
+
"""
|
245
|
+
if not snapshot_mappings:
|
246
|
+
return
|
247
|
+
|
248
|
+
print 'Snapshotting disks...'
|
249
|
+
requests = []
|
250
|
+
for disk_name, snapshot_name in snapshot_mappings.iteritems():
|
251
|
+
snapshot_resource = {
|
252
|
+
'name': snapshot_name,
|
253
|
+
'sourceDisk': self.NormalizePerZoneResourceName(
|
254
|
+
self._project, src_zone, 'disks', disk_name),
|
255
|
+
'description': ('Snapshot for moving disk %s from %s to %s.' %
|
256
|
+
(disk_name, src_zone, dest_zone))}
|
257
|
+
requests.append(self._snapshots_api.insert(
|
258
|
+
project=self._project, body=snapshot_resource))
|
259
|
+
|
260
|
+
results, exceptions = self.ExecuteRequests(
|
261
|
+
requests, collection_name='snapshots')
|
262
|
+
if exceptions:
|
263
|
+
raise command_base.CommandError(
|
264
|
+
'Aborting due to errors while creating snapshots:\n%s' %
|
265
|
+
utils.ListStrings(exceptions))
|
266
|
+
self._CheckForErrorsInOps(self.MakeListResult(results, 'operationList'))
|
267
|
+
self._WaitForSnapshots(snapshot_mappings.values())
|
268
|
+
|
269
|
+
def _DeleteSnapshots(self, snapshot_names, zone):
|
270
|
+
"""Deletes the given snapshots.
|
271
|
+
|
272
|
+
Args:
|
273
|
+
snapshot_names: A list of snapshot names to delete.
|
274
|
+
zone: The zones to which the snapshots belong.
|
275
|
+
"""
|
276
|
+
if not snapshot_names or self._flags.keep_snapshots:
|
277
|
+
return
|
278
|
+
|
279
|
+
print 'Deleting snapshots...'
|
280
|
+
requests = []
|
281
|
+
for name in snapshot_names:
|
282
|
+
requests.append(self._snapshots_api.delete(
|
283
|
+
project=self._project, snapshot=name))
|
284
|
+
|
285
|
+
results, exceptions = self.ExecuteRequests(
|
286
|
+
requests, collection_name='snapshots')
|
287
|
+
if exceptions:
|
288
|
+
raise command_base.CommandError(
|
289
|
+
'Aborting due to errors while deleting snapshots:\n%s' %
|
290
|
+
utils.ListStrings(exceptions))
|
291
|
+
self._CheckForErrorsInOps(self.MakeListResult(results, 'operationList'))
|
292
|
+
|
293
|
+
def _CreateDisksFromSnapshots(self, snapshot_mappings, dest_zone):
|
294
|
+
"""Creates disks in the destination zone from the given snapshots.
|
295
|
+
|
296
|
+
Args:
|
297
|
+
snapshot_mappings: A dict of disk names to snapshot names. Disks are
|
298
|
+
created in the destination zone from the given snapshot names. The
|
299
|
+
disks will assume their previous names as indicated by the key-value
|
300
|
+
pairs.
|
301
|
+
dest_zone: The zone in which the disks will be created.
|
302
|
+
"""
|
303
|
+
if not snapshot_mappings:
|
304
|
+
return
|
305
|
+
|
306
|
+
print 'Recreating disks from snapshots...'
|
307
|
+
requests = []
|
308
|
+
for disk_name, snapshot_name in snapshot_mappings.iteritems():
|
309
|
+
disk_resource = {
|
310
|
+
'name': disk_name,
|
311
|
+
'sourceSnapshot': self.NormalizeGlobalResourceName(
|
312
|
+
self._project, 'snapshots', snapshot_name)}
|
313
|
+
requests.append(self._disks_api.insert(
|
314
|
+
project=self._project, body=disk_resource, zone=dest_zone))
|
315
|
+
|
316
|
+
results, exceptions = self.ExecuteRequests(
|
317
|
+
requests, collection_name='disks')
|
318
|
+
if exceptions:
|
319
|
+
raise command_base.CommandError(
|
320
|
+
'Aborting due to errors while re-creating disks:\n%s' %
|
321
|
+
utils.ListStrings(exceptions))
|
322
|
+
self._CheckForErrorsInOps(self.MakeListResult(results, 'operationList'))
|
323
|
+
|
324
|
+
def _DeleteDisks(self, disk_names, zone):
|
325
|
+
"""Deletes the given disks.
|
326
|
+
|
327
|
+
Args:
|
328
|
+
disk_names: A list of disk names to delete.
|
329
|
+
zone: The zone to which the disks belong.
|
330
|
+
"""
|
331
|
+
if not disk_names:
|
332
|
+
return
|
333
|
+
|
334
|
+
print 'Deleting disks...'
|
335
|
+
requests = []
|
336
|
+
for name in disk_names:
|
337
|
+
requests.append(self._disks_api.delete(
|
338
|
+
project=self._project, disk=name, zone=zone))
|
339
|
+
|
340
|
+
results, exceptions = self.ExecuteRequests(
|
341
|
+
requests, collection_name='disks')
|
342
|
+
if exceptions:
|
343
|
+
raise command_base.CommandError(
|
344
|
+
'Aborting due to errors while deleting disks:\n%s' %
|
345
|
+
utils.ListStrings(exceptions))
|
346
|
+
self._CheckForErrorsInOps(self.MakeListResult(results, 'operationList'))
|
347
|
+
|
348
|
+
def _CalculateNumCpus(self, instances_to_mv):
|
349
|
+
"""Calculates the amount of CPUs used by the given instances."""
|
350
|
+
machines = utils.All(
|
351
|
+
self._machine_type_api.list,
|
352
|
+
self._project)['items']
|
353
|
+
num_cpus = dict((m['selfLink'], m['guestCpus']) for m in machines)
|
354
|
+
return sum(float(num_cpus[i['machineType']]) for i in instances_to_mv)
|
355
|
+
|
356
|
+
def _CalculateTotalDisksSizeGb(self, disk_names, zone):
|
357
|
+
"""Calculates the total size of the given disks."""
|
358
|
+
disk_names = set(disk_names)
|
359
|
+
disks = utils.All(
|
360
|
+
self._disks_api.list,
|
361
|
+
self._project,
|
362
|
+
zone=zone)['items']
|
363
|
+
disk_sizes = [float(d['sizeGb']) for d in disks if d['name'] in disk_names]
|
364
|
+
return sum(disk_sizes)
|
365
|
+
|
366
|
+
def _CreateQuotaRequirementsDict(self, instances_to_mv, disks_to_mv,
|
367
|
+
src_zone, snapshots_to_create=None):
|
368
|
+
"""Generates a mapping between resource type to the quota required."""
|
369
|
+
return {'INSTANCES': len(instances_to_mv),
|
370
|
+
'CPUS': self._CalculateNumCpus(instances_to_mv),
|
371
|
+
'DISKS': len(disks_to_mv),
|
372
|
+
'DISKS_TOTAL_GB': self._CalculateTotalDisksSizeGb(
|
373
|
+
disks_to_mv, src_zone),
|
374
|
+
'SNAPSHOTS': (len(snapshots_to_create)
|
375
|
+
if snapshots_to_create is not None
|
376
|
+
else len(disks_to_mv))}
|
377
|
+
|
378
|
+
def _CheckQuotas(self, instances_to_mv, disks_to_mv, src_zone, dest_zone,
|
379
|
+
snapshots_to_create=None):
|
380
|
+
"""Raises a CommandError if the quota to perform the move does not exist."""
|
381
|
+
print 'Checking project and destination zone quotas...'
|
382
|
+
|
383
|
+
dest_zone_resource = self._zones_api.get(
|
384
|
+
project=self._project, zone=dest_zone).execute()
|
385
|
+
requirements = self._CreateQuotaRequirementsDict(
|
386
|
+
instances_to_mv, disks_to_mv, src_zone,
|
387
|
+
snapshots_to_create=snapshots_to_create)
|
388
|
+
available = self._ExtractAvailableQuota(
|
389
|
+
self._project_resource.get('quotas', []),
|
390
|
+
dest_zone_resource.get('quotas', []), requirements)
|
391
|
+
|
392
|
+
LOGGER.debug('Required quota for move is: %s', requirements)
|
393
|
+
LOGGER.debug('Available quota is: %s', available)
|
394
|
+
|
395
|
+
for metric, required in requirements.iteritems():
|
396
|
+
if available.get(metric, 0) - required < 0:
|
397
|
+
raise command_base.CommandError(
|
398
|
+
'You do not have enough quota for %s in %s or your project.' % (
|
399
|
+
metric, dest_zone))
|
400
|
+
|
401
|
+
def _ExtractAvailableQuota(self, project_quota, zone_quota, requirements):
|
402
|
+
"""Extracts the required quota from the given project and zone resources.
|
403
|
+
|
404
|
+
Args:
|
405
|
+
project_quota: The list of project quotas that's included in a project
|
406
|
+
resource.
|
407
|
+
zone_quota: The list of zone quotas that's included in a zone resource.
|
408
|
+
requirements: A dict mapping resource type to the amount of required
|
409
|
+
quota.
|
410
|
+
|
411
|
+
Returns:
|
412
|
+
A mapping of available quota for INSTANCES, CPUS, DISKS, DISKS_TOTAL_GB,
|
413
|
+
and SNAPSHOTS. The value can be negative if enough quota does not exist.
|
414
|
+
"""
|
415
|
+
pertinent_resources = set(requirements.keys())
|
416
|
+
available = {}
|
417
|
+
|
418
|
+
for quota in project_quota:
|
419
|
+
metric = quota.get('metric')
|
420
|
+
if metric in pertinent_resources:
|
421
|
+
available[metric] = quota.get('limit') - quota.get('usage')
|
422
|
+
# For existing resources that are to be moved (i.e.,
|
423
|
+
# everything in requirements except snapshots since they do
|
424
|
+
# not exist yet) since they do not exist yet) we must count
|
425
|
+
# them into the available number since they will be deleted
|
426
|
+
# shortly.
|
427
|
+
if metric != 'SNAPSHOTS':
|
428
|
+
available[metric] += requirements[metric]
|
429
|
+
|
430
|
+
for quota in zone_quota:
|
431
|
+
metric = quota.get('metric')
|
432
|
+
if metric in pertinent_resources:
|
433
|
+
available[metric] = min(available[metric],
|
434
|
+
quota.get('limit') - quota.get('usage'))
|
435
|
+
|
436
|
+
return available
|
437
|
+
|
438
|
+
|
439
|
+
class MoveInstances(MoveInstancesBase):
|
440
|
+
"""Move a set of instances from one zone to another zone.
|
441
|
+
|
442
|
+
This command also moves any persistent disks that are attached to
|
443
|
+
the instances.
|
444
|
+
|
445
|
+
During the move, do not modify your project, as changes to the
|
446
|
+
project may interfere with the move.
|
447
|
+
|
448
|
+
In case of failure, use the gcutil resumemove command to re-attempt
|
449
|
+
the move.
|
450
|
+
|
451
|
+
You can pick which instances to move by specifying a series regular
|
452
|
+
expressions that will be used to match instance names in the source
|
453
|
+
zone. For example, the following command will move all instances in
|
454
|
+
zone-a whose names match the regular expressions i-[0-9] or b-.* to
|
455
|
+
zone-b:
|
456
|
+
|
457
|
+
gcutil moveinstances \
|
458
|
+
--source_zone=zone-a \
|
459
|
+
--destination_zone=zone-b \
|
460
|
+
"i-[0-9]" "b-.*"
|
461
|
+
|
462
|
+
WARNING: Instances that are moved will lose ALL of their ephemeral
|
463
|
+
state (i.e., ephemeral disks, ephemeral IP addresses, and memory).
|
464
|
+
"""
|
465
|
+
|
466
|
+
positional_args = '<name-regex-1> ... <name-regex-n>'
|
467
|
+
|
468
|
+
def __init__(self, name, flag_values):
|
469
|
+
"""Constructs a new MoveInstances object."""
|
470
|
+
super(MoveInstances, self).__init__(name, flag_values)
|
471
|
+
|
472
|
+
flags.DEFINE_string(
|
473
|
+
'source_zone',
|
474
|
+
None,
|
475
|
+
'The source zone from which instances will be moved.',
|
476
|
+
flag_values=flag_values)
|
477
|
+
flags.DEFINE_string(
|
478
|
+
'destination_zone',
|
479
|
+
None,
|
480
|
+
'The zone to which the instances should be moved.',
|
481
|
+
flag_values=flag_values)
|
482
|
+
|
483
|
+
def _ValidateFlags(self):
|
484
|
+
"""Raises a UsageError if there is any problem with the flags."""
|
485
|
+
if not self._flags.source_zone:
|
486
|
+
raise app.UsageError(
|
487
|
+
'You must specify a source zone through the --source_zone flag.')
|
488
|
+
if not self._flags.destination_zone:
|
489
|
+
raise app.UsageError('You must specify a destination zone '
|
490
|
+
'through the --destination_zone flag.')
|
491
|
+
if self._flags.source_zone == self._flags.destination_zone:
|
492
|
+
raise app.UsageError('The destination and source zones cannot be equal.')
|
493
|
+
|
494
|
+
def HandleMove(self, *instance_regexes):
|
495
|
+
"""Handles the actual move.
|
496
|
+
|
497
|
+
Args:
|
498
|
+
*instance_regexes: The sequence of name regular expressions used
|
499
|
+
for filtering.
|
500
|
+
"""
|
501
|
+
self._ValidateFlags()
|
502
|
+
|
503
|
+
if not instance_regexes:
|
504
|
+
raise app.UsageError(
|
505
|
+
'You must specify at least one regex for instances to move.')
|
506
|
+
|
507
|
+
self._flags.destination_zone = self.DenormalizeResourceName(
|
508
|
+
self._flags.destination_zone)
|
509
|
+
self._CheckDestinationZone()
|
510
|
+
|
511
|
+
print 'Retrieving instances in %s matching: %s...' % (
|
512
|
+
self._flags.source_zone, ' '.join(instance_regexes))
|
513
|
+
instances_to_mv = utils.All(
|
514
|
+
self._instances_api.list,
|
515
|
+
self._project,
|
516
|
+
filter=utils.RegexesToFilterExpression(instance_regexes),
|
517
|
+
zone=self._flags.source_zone)['items']
|
518
|
+
instances_in_dest = utils.All(
|
519
|
+
self._instances_api.list,
|
520
|
+
self._project,
|
521
|
+
filter=utils.RegexesToFilterExpression(instance_regexes),
|
522
|
+
zone=self._flags.destination_zone)['items']
|
523
|
+
|
524
|
+
self._CheckInstancePreconditions(instances_to_mv, instances_in_dest)
|
525
|
+
|
526
|
+
instances_to_ignore = utils.All(
|
527
|
+
self._instances_api.list,
|
528
|
+
self._project,
|
529
|
+
filter=utils.RegexesToFilterExpression(instance_regexes, op='ne'),
|
530
|
+
zone=self._flags.source_zone)['items']
|
531
|
+
|
532
|
+
print 'Checking disk preconditions...'
|
533
|
+
disks_to_mv = self._GetPersistentDiskNames(instances_to_mv)
|
534
|
+
self._CheckDiskPreconditions(instances_to_ignore, disks_to_mv)
|
535
|
+
# At this point, all disks in use by instances_to_mv are only
|
536
|
+
# attached to instances in the set instances_to_mv.
|
537
|
+
|
538
|
+
# Check the snapshots quota and the quota in the destination zone
|
539
|
+
# to make sure that enough quota exists to support the move.
|
540
|
+
self._CheckQuotas(instances_to_mv, disks_to_mv, self._flags.source_zone,
|
541
|
+
self._flags.destination_zone)
|
542
|
+
|
543
|
+
self._Confirm(instances_to_mv, [], disks_to_mv,
|
544
|
+
self._flags.destination_zone)
|
545
|
+
|
546
|
+
log_path = self._GenerateLogPath()
|
547
|
+
snapshot_mappings = self._GenerateSnapshotNames(disks_to_mv)
|
548
|
+
self._WriteLog(log_path, instances_to_mv, snapshot_mappings)
|
549
|
+
|
550
|
+
self._DeleteInstances(instances_to_mv, self._flags.source_zone)
|
551
|
+
|
552
|
+
# Assuming no other processes have modified the user's project, at
|
553
|
+
# this point, we can assume that all disks-to-be-moved are
|
554
|
+
# dormant.
|
555
|
+
self._CreateSnapshots(snapshot_mappings,
|
556
|
+
self._flags.source_zone,
|
557
|
+
self._flags.destination_zone)
|
558
|
+
self._DeleteDisks(disks_to_mv, self._flags.source_zone)
|
559
|
+
self._CreateDisksFromSnapshots(snapshot_mappings,
|
560
|
+
self._flags.destination_zone)
|
561
|
+
self._CreateInstances(instances_to_mv,
|
562
|
+
self._flags.source_zone,
|
563
|
+
self._flags.destination_zone)
|
564
|
+
|
565
|
+
self._DeleteSnapshots(snapshot_mappings.values(),
|
566
|
+
self._flags.destination_zone)
|
567
|
+
|
568
|
+
# We have succeeded, so it's safe to delete the log file.
|
569
|
+
os.remove(log_path)
|
570
|
+
|
571
|
+
def _GenerateSnapshotNames(self, disk_names):
|
572
|
+
"""Returns a dict mapping each disk name to a random UUID.
|
573
|
+
|
574
|
+
The UUID will be used as the disk's snapshot name. UUID's are
|
575
|
+
valid Compute resource names. Further, UUID collisions are
|
576
|
+
improbable, so using them is a great way for generating resource
|
577
|
+
names (e.g., we avoid network communication to check if the name
|
578
|
+
we choose already exists).
|
579
|
+
|
580
|
+
Args:
|
581
|
+
disk_names: A list of disk_names for which snapshot names
|
582
|
+
should be generated.
|
583
|
+
|
584
|
+
Returns:
|
585
|
+
A dict with the mapping.
|
586
|
+
"""
|
587
|
+
return dict((name, 'snapshot-' + str(uuid.uuid4())) for name in disk_names)
|
588
|
+
|
589
|
+
def _CheckInstancePreconditions(self, instances_to_mv, instances_in_dest):
|
590
|
+
if not instances_to_mv:
|
591
|
+
raise command_base.CommandError('No matching instances were found.')
|
592
|
+
|
593
|
+
if len(instances_to_mv) > MAX_INSTANCES_TO_MOVE:
|
594
|
+
raise command_base.CommandError(
|
595
|
+
'At most %s instances can be moved at a '
|
596
|
+
'time. Refine your query and try again.' % MAX_INSTANCES_TO_MOVE)
|
597
|
+
|
598
|
+
# Checks for name collisions.
|
599
|
+
src_names = [i['name'] for i in instances_to_mv]
|
600
|
+
dest_names = [i['name'] for i in instances_in_dest]
|
601
|
+
common_names = set(src_names) & set(dest_names)
|
602
|
+
if common_names:
|
603
|
+
raise command_base.CommandError(
|
604
|
+
'Encountered name collisions. Instances with the following names '
|
605
|
+
'exist in both the source and destination zones: \n%s' %
|
606
|
+
utils.ListStrings(common_names))
|
607
|
+
|
608
|
+
def _CheckDiskPreconditions(self, instances_to_ignore, disk_names):
|
609
|
+
if len(disk_names) > MAX_DISKS_TO_MOVE:
|
610
|
+
raise command_base.CommandError(
|
611
|
+
'At most %s disks can be moved at a '
|
612
|
+
'time. Refine your query and try again.' % MAX_DISKS_TO_MOVE)
|
613
|
+
|
614
|
+
res = self._CheckForDisksInUseByOtherInstances(
|
615
|
+
instances_to_ignore, disk_names)
|
616
|
+
if res:
|
617
|
+
offending_instances = ['%s: %s' % (instance, ', '.join(disks))
|
618
|
+
for instance, disks in res]
|
619
|
+
raise command_base.CommandError(
|
620
|
+
'Some of the instances you\'d like to move have disks that are in '
|
621
|
+
'use by other instances: (Offending instance: disks attached)\n%s' %
|
622
|
+
(utils.ListStrings(offending_instances)))
|
623
|
+
|
624
|
+
def _CheckForDisksInUseByOtherInstances(self, instances, disk_names):
|
625
|
+
"""Returns a list containing a mapping of instance to persistent disks.
|
626
|
+
|
627
|
+
Args:
|
628
|
+
instances: The set of instances to inspect.
|
629
|
+
disk_names: The disks to look for.
|
630
|
+
|
631
|
+
Returns:
|
632
|
+
A list of tuples where the first element of each tuple is an instance
|
633
|
+
name and the second element is a list of disks attached to that
|
634
|
+
instance.
|
635
|
+
"""
|
636
|
+
res = {}
|
637
|
+
disk_names = set(disk_names)
|
638
|
+
for instance in instances:
|
639
|
+
instance_name = instance['name']
|
640
|
+
for disk in instance.get('disks', []):
|
641
|
+
if disk['type'] != 'PERSISTENT':
|
642
|
+
continue
|
643
|
+
disk_name = disk['source'].split('/')[-1]
|
644
|
+
if disk_name in disk_names:
|
645
|
+
if instance_name not in res:
|
646
|
+
res[instance_name] = []
|
647
|
+
res[instance_name].append(disk_name)
|
648
|
+
return sorted(res.iteritems())
|
649
|
+
|
650
|
+
def _GetPersistentDiskNames(self, instances):
|
651
|
+
res = []
|
652
|
+
for instance in instances:
|
653
|
+
for disk in instance.get('disks', []):
|
654
|
+
if disk['type'] == 'PERSISTENT':
|
655
|
+
res.append(disk['source'].split('/')[-1])
|
656
|
+
return res
|
657
|
+
|
658
|
+
def _CheckDestinationZone(self):
|
659
|
+
"""Raises an exception if the destination zone is not valid."""
|
660
|
+
print 'Checking destination zone...'
|
661
|
+
self._zones_api.get(project=self._project,
|
662
|
+
zone=self._flags.destination_zone).execute()
|
663
|
+
|
664
|
+
def _WriteLog(self, log_path, instances_to_mv, snapshot_mappings):
|
665
|
+
"""Logs the instances that will be moved and the destination zone."""
|
666
|
+
print 'If the move fails, you can re-attempt it using:'
|
667
|
+
print ' gcutil resumemove %s' % log_path
|
668
|
+
with open(log_path, 'w') as f:
|
669
|
+
contents = {'version': version.__version__,
|
670
|
+
'dest_zone': self._flags.destination_zone,
|
671
|
+
'src_zone': self._flags.source_zone,
|
672
|
+
'instances': instances_to_mv,
|
673
|
+
'snapshot_mappings': snapshot_mappings}
|
674
|
+
json.dump(contents, f)
|
675
|
+
|
676
|
+
def _GenerateLogPath(self):
|
677
|
+
"""Generates a file path in the form ~/.gcutil.move.YYmmddHHMMSS."""
|
678
|
+
timestamp = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
|
679
|
+
return os.path.join(os.path.expanduser('~'), '.gcutil.move.' + timestamp)
|
680
|
+
|
681
|
+
|
682
|
+
class ResumeMove(MoveInstancesBase):
|
683
|
+
"""Resume a previously-failed move.
|
684
|
+
|
685
|
+
The moveinstances subcommand produces a log file that can be used to
|
686
|
+
re-attempt a move that fails. This is intended to help complete
|
687
|
+
moves that are interrupted by the user or by transient network
|
688
|
+
failures.
|
689
|
+
|
690
|
+
WARNING: Instances that are moved will lose ALL of their ephemeral
|
691
|
+
state (i.e., ephemeral disks, ephemeral IP addresses, and memory).
|
692
|
+
"""
|
693
|
+
|
694
|
+
positional_args = '<log-path>'
|
695
|
+
|
696
|
+
def __init__(self, name, flag_values):
|
697
|
+
super(ResumeMove, self).__init__(name, flag_values)
|
698
|
+
|
699
|
+
flags.DEFINE_boolean(
|
700
|
+
'keep_log_file',
|
701
|
+
False,
|
702
|
+
'If true, the log file is not deleted at the end of the resume.',
|
703
|
+
flag_values=flag_values)
|
704
|
+
|
705
|
+
def _Intersect(self, resources1, resources2):
|
706
|
+
"""set(resources1) & set(resources2) based on the name field."""
|
707
|
+
names1 = set(r['name'] for r in resources1)
|
708
|
+
return [r for r in resources2 if r['name'] in names1]
|
709
|
+
|
710
|
+
def _Subtract(self, resources1, resources2):
|
711
|
+
"""set(resources1) - set(resources2) based on the name field."""
|
712
|
+
names2 = set(r['name'] for r in resources2)
|
713
|
+
return [r for r in resources1 if r['name'] not in names2]
|
714
|
+
|
715
|
+
def _GetKey(self, log, key):
|
716
|
+
"""Returns log[key] or raises a CommandError if key does not exist."""
|
717
|
+
value = log.get(key)
|
718
|
+
if value is None:
|
719
|
+
raise command_base.CommandError(
|
720
|
+
'The log file did not contain a %s key.' % repr(key))
|
721
|
+
return value
|
722
|
+
|
723
|
+
def _ParseLog(self, log_path):
|
724
|
+
"""Loads the JSON contents of the file pointed to by log_path."""
|
725
|
+
print 'Parsing log file...'
|
726
|
+
with open(log_path) as f:
|
727
|
+
result = json.load(f)
|
728
|
+
return result
|
729
|
+
|
730
|
+
def HandleMove(self, log_path):
|
731
|
+
"""Attempts the move dictated in the given log file.
|
732
|
+
|
733
|
+
This method first checks the current state of the project to see
|
734
|
+
which instances have already been moved before moving the
|
735
|
+
instances that were left behind in a previous failed move.
|
736
|
+
|
737
|
+
The user is prompted to continue before any changes are made.
|
738
|
+
|
739
|
+
Args:
|
740
|
+
log_path: The path to the replay log.
|
741
|
+
"""
|
742
|
+
if not os.path.exists(log_path):
|
743
|
+
raise command_base.CommandError('File not found: %s' % log_path)
|
744
|
+
|
745
|
+
log = self._ParseLog(log_path)
|
746
|
+
|
747
|
+
src_zone = self._GetKey(log, 'src_zone')
|
748
|
+
print 'Source zone is %s.' % src_zone
|
749
|
+
|
750
|
+
dest_zone = self._GetKey(log, 'dest_zone')
|
751
|
+
print 'Destination zone is %s.' % dest_zone
|
752
|
+
|
753
|
+
snapshot_mappings = self._GetKey(log, 'snapshot_mappings')
|
754
|
+
instances_to_mv = self._GetKey(log, 'instances')
|
755
|
+
|
756
|
+
instances_in_dest = utils.All(
|
757
|
+
self._instances_api.list, self._project, zone=dest_zone)['items']
|
758
|
+
instances_in_source = utils.All(
|
759
|
+
self._instances_api.list, self._project, zone=src_zone)['items']
|
760
|
+
|
761
|
+
# Note that we cannot use normal set intersection and subtraction
|
762
|
+
# because two different instance resources could be referring to
|
763
|
+
# the same instance (e.g., the instance was restarted by the
|
764
|
+
# system).
|
765
|
+
instances_to_ignore = self._Intersect(instances_to_mv, instances_in_dest)
|
766
|
+
instances_to_mv = self._Subtract(instances_to_mv, instances_in_dest)
|
767
|
+
|
768
|
+
if not instances_to_mv:
|
769
|
+
raise command_base.CommandError(
|
770
|
+
'All instances are already in %s.' % dest_zone)
|
771
|
+
|
772
|
+
# Figures out which disks have not been moved.
|
773
|
+
disks_in_dest = set(utils.AllNames(
|
774
|
+
self._disks_api.list, self._project, zone=dest_zone))
|
775
|
+
disks_in_src = set(utils.AllNames(
|
776
|
+
self._disks_api.list, self._project, zone=src_zone))
|
777
|
+
|
778
|
+
disks_to_mv = set(snapshot_mappings.keys()) & disks_in_src
|
779
|
+
|
780
|
+
instances_to_delete = self._Intersect(instances_to_mv, instances_in_source)
|
781
|
+
|
782
|
+
# For the disks that are still in the source zone, figures out
|
783
|
+
# which ones still need to be snapshotted before being deleted.
|
784
|
+
snapshot_mappings_for_unmoved_disks = {}
|
785
|
+
if disks_to_mv:
|
786
|
+
current_snapshots = utils.AllNames(
|
787
|
+
self._snapshots_api.list, self._project)
|
788
|
+
|
789
|
+
for disk, snapshot in snapshot_mappings.iteritems():
|
790
|
+
if disk in disks_to_mv and snapshot not in current_snapshots:
|
791
|
+
snapshot_mappings_for_unmoved_disks[disk] = snapshot
|
792
|
+
|
793
|
+
# Ensures that the current quotas can support the move and prompts
|
794
|
+
# the user for confirmation.
|
795
|
+
self._CheckQuotas(instances_to_mv, disks_to_mv, src_zone, dest_zone,
|
796
|
+
snapshots_to_create=snapshot_mappings_for_unmoved_disks)
|
797
|
+
self._Confirm(instances_to_mv, instances_to_ignore,
|
798
|
+
disks_to_mv, dest_zone)
|
799
|
+
|
800
|
+
self._DeleteInstances(instances_to_delete, src_zone)
|
801
|
+
self._CreateSnapshots(snapshot_mappings_for_unmoved_disks,
|
802
|
+
src_zone, dest_zone)
|
803
|
+
self._DeleteDisks(disks_to_mv, src_zone)
|
804
|
+
|
805
|
+
# Create disks in destination zone from snapshots.
|
806
|
+
all_snapshots = set(utils.AllNames(
|
807
|
+
self._snapshots_api.list, self._project))
|
808
|
+
disks_to_create = {}
|
809
|
+
for disk, snapshot in snapshot_mappings.iteritems():
|
810
|
+
if snapshot in all_snapshots and disk not in disks_in_dest:
|
811
|
+
disks_to_create[disk] = snapshot
|
812
|
+
self._CreateDisksFromSnapshots(disks_to_create, dest_zone)
|
813
|
+
|
814
|
+
self._CreateInstances(instances_to_mv, src_zone, dest_zone)
|
815
|
+
self._DeleteSnapshots(disks_to_create.values(), dest_zone)
|
816
|
+
|
817
|
+
if not self._flags.keep_log_file:
|
818
|
+
# We have succeeded, so it's safe to delete the log file.
|
819
|
+
os.remove(log_path)
|
820
|
+
|
821
|
+
|
822
|
+
def AddCommands():
|
823
|
+
appcommands.AddCmd('moveinstances', MoveInstances)
|
824
|
+
appcommands.AddCmd('resumemove', ResumeMove)
|