anyscale 0.26.40__py3-none-any.whl → 0.26.42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,122 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Managed Ray API
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
7
+
8
+ The version of the OpenAPI document: 0.1.0
9
+ Generated by: https://openapi-generator.tech
10
+ """
11
+
12
+
13
+ import pprint
14
+ import re # noqa: F401
15
+
16
+ import six
17
+
18
+ from openapi_client.configuration import Configuration
19
+
20
+
21
+ class MachinePoolSearchQuery(object):
22
+ """NOTE: This class is auto generated by OpenAPI Generator.
23
+ Ref: https://openapi-generator.tech
24
+
25
+ Do not edit the class manually.
26
+ """
27
+
28
+ """
29
+ Attributes:
30
+ openapi_types (dict): The key is attribute name
31
+ and the value is attribute type.
32
+ attribute_map (dict): The key is attribute name
33
+ and the value is json key in definition.
34
+ """
35
+ openapi_types = {
36
+ 'machine_pool_name': 'TextQuery'
37
+ }
38
+
39
+ attribute_map = {
40
+ 'machine_pool_name': 'machine_pool_name'
41
+ }
42
+
43
+ def __init__(self, machine_pool_name=None, local_vars_configuration=None): # noqa: E501
44
+ """MachinePoolSearchQuery - a model defined in OpenAPI""" # noqa: E501
45
+ if local_vars_configuration is None:
46
+ local_vars_configuration = Configuration()
47
+ self.local_vars_configuration = local_vars_configuration
48
+
49
+ self._machine_pool_name = None
50
+ self.discriminator = None
51
+
52
+ if machine_pool_name is not None:
53
+ self.machine_pool_name = machine_pool_name
54
+
55
+ @property
56
+ def machine_pool_name(self):
57
+ """Gets the machine_pool_name of this MachinePoolSearchQuery. # noqa: E501
58
+
59
+ Filters machine pools by name. If this field is absent, no filtering is done. # noqa: E501
60
+
61
+ :return: The machine_pool_name of this MachinePoolSearchQuery. # noqa: E501
62
+ :rtype: TextQuery
63
+ """
64
+ return self._machine_pool_name
65
+
66
+ @machine_pool_name.setter
67
+ def machine_pool_name(self, machine_pool_name):
68
+ """Sets the machine_pool_name of this MachinePoolSearchQuery.
69
+
70
+ Filters machine pools by name. If this field is absent, no filtering is done. # noqa: E501
71
+
72
+ :param machine_pool_name: The machine_pool_name of this MachinePoolSearchQuery. # noqa: E501
73
+ :type: TextQuery
74
+ """
75
+
76
+ self._machine_pool_name = machine_pool_name
77
+
78
+ def to_dict(self):
79
+ """Returns the model properties as a dict"""
80
+ result = {}
81
+
82
+ for attr, _ in six.iteritems(self.openapi_types):
83
+ value = getattr(self, attr)
84
+ if isinstance(value, list):
85
+ result[attr] = list(map(
86
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
87
+ value
88
+ ))
89
+ elif hasattr(value, "to_dict"):
90
+ result[attr] = value.to_dict()
91
+ elif isinstance(value, dict):
92
+ result[attr] = dict(map(
93
+ lambda item: (item[0], item[1].to_dict())
94
+ if hasattr(item[1], "to_dict") else item,
95
+ value.items()
96
+ ))
97
+ else:
98
+ result[attr] = value
99
+
100
+ return result
101
+
102
+ def to_str(self):
103
+ """Returns the string representation of the model"""
104
+ return pprint.pformat(self.to_dict())
105
+
106
+ def __repr__(self):
107
+ """For `print` and `pprint`"""
108
+ return self.to_str()
109
+
110
+ def __eq__(self, other):
111
+ """Returns true if both objects are equal"""
112
+ if not isinstance(other, MachinePoolSearchQuery):
113
+ return False
114
+
115
+ return self.to_dict() == other.to_dict()
116
+
117
+ def __ne__(self, other):
118
+ """Returns true if both objects are not equal"""
119
+ if not isinstance(other, MachinePoolSearchQuery):
120
+ return True
121
+
122
+ return self.to_dict() != other.to_dict()
@@ -0,0 +1,123 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Managed Ray API
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
7
+
8
+ The version of the OpenAPI document: 0.1.0
9
+ Generated by: https://openapi-generator.tech
10
+ """
11
+
12
+
13
+ import pprint
14
+ import re # noqa: F401
15
+
16
+ import six
17
+
18
+ from openapi_client.configuration import Configuration
19
+
20
+
21
+ class UpdatePaymentInfo(object):
22
+ """NOTE: This class is auto generated by OpenAPI Generator.
23
+ Ref: https://openapi-generator.tech
24
+
25
+ Do not edit the class manually.
26
+ """
27
+
28
+ """
29
+ Attributes:
30
+ openapi_types (dict): The key is attribute name
31
+ and the value is attribute type.
32
+ attribute_map (dict): The key is attribute name
33
+ and the value is json key in definition.
34
+ """
35
+ openapi_types = {
36
+ 'redirect_url': 'str'
37
+ }
38
+
39
+ attribute_map = {
40
+ 'redirect_url': 'redirect_url'
41
+ }
42
+
43
+ def __init__(self, redirect_url=None, local_vars_configuration=None): # noqa: E501
44
+ """UpdatePaymentInfo - a model defined in OpenAPI""" # noqa: E501
45
+ if local_vars_configuration is None:
46
+ local_vars_configuration = Configuration()
47
+ self.local_vars_configuration = local_vars_configuration
48
+
49
+ self._redirect_url = None
50
+ self.discriminator = None
51
+
52
+ self.redirect_url = redirect_url
53
+
54
+ @property
55
+ def redirect_url(self):
56
+ """Gets the redirect_url of this UpdatePaymentInfo. # noqa: E501
57
+
58
+ URL to redirect to for adding/updating payment info # noqa: E501
59
+
60
+ :return: The redirect_url of this UpdatePaymentInfo. # noqa: E501
61
+ :rtype: str
62
+ """
63
+ return self._redirect_url
64
+
65
+ @redirect_url.setter
66
+ def redirect_url(self, redirect_url):
67
+ """Sets the redirect_url of this UpdatePaymentInfo.
68
+
69
+ URL to redirect to for adding/updating payment info # noqa: E501
70
+
71
+ :param redirect_url: The redirect_url of this UpdatePaymentInfo. # noqa: E501
72
+ :type: str
73
+ """
74
+ if self.local_vars_configuration.client_side_validation and redirect_url is None: # noqa: E501
75
+ raise ValueError("Invalid value for `redirect_url`, must not be `None`") # noqa: E501
76
+
77
+ self._redirect_url = redirect_url
78
+
79
+ def to_dict(self):
80
+ """Returns the model properties as a dict"""
81
+ result = {}
82
+
83
+ for attr, _ in six.iteritems(self.openapi_types):
84
+ value = getattr(self, attr)
85
+ if isinstance(value, list):
86
+ result[attr] = list(map(
87
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
88
+ value
89
+ ))
90
+ elif hasattr(value, "to_dict"):
91
+ result[attr] = value.to_dict()
92
+ elif isinstance(value, dict):
93
+ result[attr] = dict(map(
94
+ lambda item: (item[0], item[1].to_dict())
95
+ if hasattr(item[1], "to_dict") else item,
96
+ value.items()
97
+ ))
98
+ else:
99
+ result[attr] = value
100
+
101
+ return result
102
+
103
+ def to_str(self):
104
+ """Returns the string representation of the model"""
105
+ return pprint.pformat(self.to_dict())
106
+
107
+ def __repr__(self):
108
+ """For `print` and `pprint`"""
109
+ return self.to_str()
110
+
111
+ def __eq__(self, other):
112
+ """Returns true if both objects are equal"""
113
+ if not isinstance(other, UpdatePaymentInfo):
114
+ return False
115
+
116
+ return self.to_dict() == other.to_dict()
117
+
118
+ def __ne__(self, other):
119
+ """Returns true if both objects are not equal"""
120
+ if not isinstance(other, UpdatePaymentInfo):
121
+ return True
122
+
123
+ return self.to_dict() != other.to_dict()
@@ -15,6 +15,7 @@ from anyscale.commands.util import AnyscaleCommand, OptionPromptNull
15
15
  from anyscale.controllers.cloud_controller import CloudController
16
16
  from anyscale.util import (
17
17
  allow_optional_file_storage,
18
+ SharedStorageType,
18
19
  validate_non_negative_arg,
19
20
  )
20
21
 
@@ -152,6 +153,14 @@ def default_region(provider: str) -> str:
152
153
  "disable this and instead manually grant users permissions to the cloud."
153
154
  ),
154
155
  )
156
+ @click.option(
157
+ "--shared-storage",
158
+ required=False,
159
+ type=click.Choice([e.value for e in SharedStorageType], case_sensitive=False),
160
+ default=SharedStorageType.OBJECT_STORAGE.value,
161
+ show_default=True,
162
+ help="The type of shared storage to use for the cloud. Use 'object-storage' for cloud bucket-based storage (e.g., S3, GCS), or 'nfs' for network file systems.",
163
+ )
155
164
  def setup_cloud( # noqa: PLR0913
156
165
  provider: str,
157
166
  region: str,
@@ -162,8 +171,12 @@ def setup_cloud( # noqa: PLR0913
162
171
  enable_head_node_fault_tolerance: bool,
163
172
  yes: bool,
164
173
  disable_auto_add_user: bool,
174
+ shared_storage: str,
165
175
  ) -> None:
166
176
  # TODO (congding): remove `anyscale_managed` in the future, now keeping it for compatibility
177
+
178
+ # Convert string to enum for type safety
179
+ shared_storage_type = SharedStorageType(shared_storage)
167
180
  if provider == "aws":
168
181
  CloudController().setup_managed_cloud(
169
182
  provider=provider,
@@ -174,6 +187,7 @@ def setup_cloud( # noqa: PLR0913
174
187
  enable_head_node_fault_tolerance=enable_head_node_fault_tolerance,
175
188
  yes=yes,
176
189
  auto_add_user=(not disable_auto_add_user),
190
+ shared_storage=shared_storage_type,
177
191
  )
178
192
  elif provider == "gcp":
179
193
  if not project_id:
@@ -193,6 +207,7 @@ def setup_cloud( # noqa: PLR0913
193
207
  enable_head_node_fault_tolerance=enable_head_node_fault_tolerance,
194
208
  yes=yes,
195
209
  auto_add_user=(not disable_auto_add_user),
210
+ shared_storage=shared_storage_type,
196
211
  )
197
212
 
198
213
 
@@ -234,6 +249,32 @@ def cloud_config_group() -> None:
234
249
  pass
235
250
 
236
251
 
252
+ @cloud_cli.command(
253
+ name="add-deployment",
254
+ help="Add a new cloud deployment to an existing cloud.",
255
+ cls=AnyscaleCommand,
256
+ example=command_examples.CLOUD_ADD_DEPLOYMENT_EXAMPLE,
257
+ )
258
+ @click.option(
259
+ "--cloud-name",
260
+ "-n",
261
+ help="The name of the cloud to add the new deployment to.",
262
+ type=str,
263
+ required=True,
264
+ )
265
+ @click.option(
266
+ "--file", "-f", help="YAML file containing the deployment spec.", required=True,
267
+ )
268
+ @click.option(
269
+ "--yes", "-y", is_flag=True, default=False, help="Skip asking for confirmation."
270
+ )
271
+ def cloud_add_deployment(cloud_name: str, file: str, yes: bool,) -> None:
272
+ try:
273
+ CloudController().add_cloud_deployment(cloud_name, file, yes)
274
+ except click.ClickException as e:
275
+ print(e)
276
+
277
+
237
278
  @cloud_cli.command(
238
279
  name="update",
239
280
  help=(
@@ -294,7 +335,10 @@ def cloud_update( # noqa: PLR0913
294
335
  file: Optional[str],
295
336
  ) -> None:
296
337
  if file:
297
- CloudController().update_cloud_deployments(file)
338
+ try:
339
+ CloudController().update_cloud_deployments(file)
340
+ except click.ClickException as e:
341
+ print(e)
298
342
  return
299
343
 
300
344
  if cloud_name and name and cloud_name != name:
@@ -651,6 +651,31 @@ collaborators:
651
651
  permission_level: "readonly"
652
652
  """
653
653
 
654
+ CLOUD_ADD_DEPLOYMENT_EXAMPLE = """\
655
+ $ anyscale cloud add-deployment --cloud my-cloud --file new-cloud-deployment.yaml
656
+ Successfully added deployment my-new-deployment to cloud my-cloud.
657
+
658
+ $ cat new-cloud-deployment.yaml
659
+ name: my-new-deployment
660
+ provider: AWS
661
+ compute_stack: VM
662
+ region: us-west-2
663
+ networking_mode: PUBLIC
664
+ object_storage:
665
+ bucket_name: s3://my-bucket
666
+ file_storage:
667
+ file_storage_id: fs-123
668
+ aws_config:
669
+ vpc_id: vpc-123
670
+ subnet_ids:
671
+ - subnet-123
672
+ security_group_ids:
673
+ - sg-123
674
+ anyscale_iam_role_id: arn:aws:iam::123456789012:role/anyscale-role-123
675
+ cluster_iam_role_id: arn:aws:iam::123456789012:role/cluster-role-123
676
+ memorydb_cluster_name: my-memorydb-cluster
677
+ """
678
+
654
679
  CLOUD_GET_CLOUD_EXAMPLE = """\
655
680
  $ anyscale cloud get --name my-cloud
656
681
  id: cld_123
@@ -100,6 +100,7 @@ from anyscale.util import ( # pylint:disable=private-import
100
100
  get_user_env_aws_account,
101
101
  prepare_cloudformation_template,
102
102
  REDIS_TLS_ADDRESS_PREFIX,
103
+ SharedStorageType,
103
104
  )
104
105
  from anyscale.utils.cloud_update_utils import (
105
106
  CLOUDFORMATION_TIMEOUT_SECONDS_LONG,
@@ -234,6 +235,7 @@ class CloudController(BaseController):
234
235
  boto3_session: Optional[boto3.Session] = None,
235
236
  is_anyscale_hosted: bool = False,
236
237
  anyscale_hosted_network_info: Optional[Dict[str, Any]] = None,
238
+ shared_storage: SharedStorageType = SharedStorageType.OBJECT_STORAGE,
237
239
  ):
238
240
  if boto3_session is None:
239
241
  boto3_session = boto3.Session(region_name=region)
@@ -248,6 +250,7 @@ class CloudController(BaseController):
248
250
  enable_head_node_fault_tolerance,
249
251
  boto3_session,
250
252
  is_anyscale_hosted=is_anyscale_hosted,
253
+ shared_storage=shared_storage,
251
254
  )
252
255
 
253
256
  parameters = [
@@ -266,6 +269,12 @@ class CloudController(BaseController):
266
269
  "ParameterKey": "MemoryDBRedisPort",
267
270
  "ParameterValue": MEMORYDB_REDIS_PORT,
268
271
  },
272
+ {
273
+ "ParameterKey": "EnableEFS",
274
+ "ParameterValue": "true"
275
+ if shared_storage == SharedStorageType.NFS
276
+ else "false",
277
+ },
269
278
  ]
270
279
  if not is_anyscale_hosted:
271
280
  parameters.append(
@@ -316,6 +325,7 @@ class CloudController(BaseController):
316
325
  anyscale_aws_account: str,
317
326
  _use_strict_iam_permissions: bool = False, # This should only be used in testing.
318
327
  boto3_session: Optional[boto3.Session] = None,
328
+ shared_storage: SharedStorageType = SharedStorageType.OBJECT_STORAGE,
319
329
  ) -> Dict[str, Any]:
320
330
  """
321
331
  Run cloudformation to create the AWS resources for a cloud.
@@ -334,6 +344,7 @@ class CloudController(BaseController):
334
344
  cloud_id,
335
345
  enable_head_node_fault_tolerance,
336
346
  boto3_session,
347
+ shared_storage=shared_storage,
337
348
  )
338
349
 
339
350
  cross_account_iam_policies = self._get_anyscale_cross_account_iam_policies(
@@ -360,6 +371,12 @@ class CloudController(BaseController):
360
371
  "ParameterKey": "MemoryDBRedisPort",
361
372
  "ParameterValue": MEMORYDB_REDIS_PORT,
362
373
  },
374
+ {
375
+ "ParameterKey": "EnableEFS",
376
+ "ParameterValue": "true"
377
+ if shared_storage == SharedStorageType.NFS
378
+ else "false",
379
+ },
363
380
  ]
364
381
  for parameter in cross_account_iam_policies:
365
382
  parameters.append(parameter)
@@ -454,6 +471,7 @@ class CloudController(BaseController):
454
471
  anyscale_aws_account: str,
455
472
  organization_id: str,
456
473
  enable_head_node_fault_tolerance: bool,
474
+ shared_storage: SharedStorageType = SharedStorageType.OBJECT_STORAGE,
457
475
  ):
458
476
  setup_utils = try_import_gcp_managed_setup_utils()
459
477
 
@@ -469,6 +487,7 @@ class CloudController(BaseController):
469
487
  anyscale_aws_account,
470
488
  organization_id,
471
489
  enable_head_node_fault_tolerance,
490
+ shared_storage=shared_storage,
472
491
  )
473
492
 
474
493
  self.log.debug("GCP Deployment Manager resource config:")
@@ -943,6 +962,7 @@ class CloudController(BaseController):
943
962
  ] = None, # This is used by AIOA cloud setup
944
963
  _use_strict_iam_permissions: bool = False, # This should only be used in testing.
945
964
  auto_add_user: bool = True,
965
+ shared_storage: SharedStorageType = SharedStorageType.OBJECT_STORAGE,
946
966
  ) -> None:
947
967
  """
948
968
  Sets up a cloud provider
@@ -991,6 +1011,7 @@ class CloudController(BaseController):
991
1011
  anyscale_aws_account,
992
1012
  _use_strict_iam_permissions=_use_strict_iam_permissions,
993
1013
  boto3_session=boto3_session,
1014
+ shared_storage=shared_storage,
994
1015
  )
995
1016
  self.cloud_event_producer.produce(
996
1017
  CloudAnalyticsEventName.RESOURCES_CREATED, succeeded=True,
@@ -1111,6 +1132,7 @@ class CloudController(BaseController):
1111
1132
  anyscale_aws_account,
1112
1133
  organization_id,
1113
1134
  enable_head_node_fault_tolerance,
1135
+ shared_storage=shared_storage,
1114
1136
  )
1115
1137
  self.cloud_event_producer.produce(
1116
1138
  CloudAnalyticsEventName.RESOURCES_CREATED, succeeded=True,
@@ -1525,48 +1547,6 @@ class CloudController(BaseController):
1525
1547
 
1526
1548
  return formatted_diff.strip()
1527
1549
 
1528
- def _compare_cloud_deployments(
1529
- self,
1530
- deployments: List[CloudDeployment],
1531
- existing_deployments: Dict[str, CloudDeployment],
1532
- ) -> List[CloudDeployment]:
1533
- """
1534
- Compares the new deployments with the existing deployments and returns a list of updated/added deployments.
1535
- """
1536
-
1537
- deployment_ids = {
1538
- deployment.cloud_deployment_id
1539
- for deployment in deployments
1540
- if deployment.cloud_deployment_id
1541
- }
1542
-
1543
- if existing_deployments.keys() - deployment_ids:
1544
- raise ClickException("Deleting cloud deployments is not supported.")
1545
-
1546
- unknown_deployments = deployment_ids - existing_deployments.keys()
1547
- if unknown_deployments:
1548
- raise ClickException(
1549
- f"Cloud deployment(s) {unknown_deployments} do not exist. Do not include a deployment ID when adding a new deployment."
1550
- )
1551
-
1552
- updated_deployments: List[CloudDeployment] = []
1553
- for d in deployments:
1554
- if d.cloud_deployment_id:
1555
- if d == existing_deployments[d.cloud_deployment_id]:
1556
- continue
1557
- if d.provider == CloudProviders.PCP:
1558
- raise ClickException(
1559
- "Updating machine pool deployments is not supported."
1560
- )
1561
- else:
1562
- if d.provider == CloudProviders.PCP:
1563
- raise ClickException(
1564
- "Please use `anyscale machine-pool attach` to attach a machine pool to a cloud."
1565
- )
1566
- updated_deployments.append(d)
1567
-
1568
- return updated_deployments
1569
-
1570
1550
  def _preprocess_aws(self, cloud_id: str, deployment: CloudDeployment,) -> None:
1571
1551
  if not deployment.aws_config and not deployment.file_storage:
1572
1552
  return
@@ -1685,6 +1665,62 @@ class CloudController(BaseController):
1685
1665
 
1686
1666
  deployment.gcp_config = gcp_config
1687
1667
 
1668
+ def add_cloud_deployment(
1669
+ self, cloud_name: str, spec_file: str, yes: bool = False,
1670
+ ):
1671
+ cloud_id, _ = get_cloud_id_and_name(self.api_client, cloud_name=cloud_name)
1672
+
1673
+ # Read the spec file.
1674
+ path = pathlib.Path(spec_file)
1675
+ if not path.exists():
1676
+ raise ClickException(f"{spec_file} does not exist.")
1677
+ if not path.is_file():
1678
+ raise ClickException(f"{spec_file} is not a file.")
1679
+
1680
+ spec = yaml.safe_load(path.read_text())
1681
+ try:
1682
+ new_deployment = CloudDeployment(**spec)
1683
+ except Exception as e: # noqa: BLE001
1684
+ raise ClickException(f"Failed to parse deployment: {e}")
1685
+
1686
+ if new_deployment.provider == CloudProviders.AWS:
1687
+ self._preprocess_aws(cloud_id=cloud_id, deployment=new_deployment)
1688
+ elif new_deployment.provider == CloudProviders.GCP:
1689
+ self._preprocess_gcp(deployment=new_deployment)
1690
+
1691
+ # Log an additional warning if a new deployment is being added but a deployment with the same AWS/GCP region already exists.
1692
+ existing_spec = self.get_cloud_deployments(cloud_id)
1693
+ existing_deployments = {
1694
+ deployment["cloud_deployment_id"]: CloudDeployment(**deployment)
1695
+ for deployment in existing_spec["deployments"]
1696
+ }
1697
+ existing_stack_provider_regions = {
1698
+ (d.compute_stack, d.provider, d.region)
1699
+ for d in existing_deployments.values()
1700
+ if d.provider in (CloudProviders.AWS, CloudProviders.GCP)
1701
+ }
1702
+ if (
1703
+ new_deployment.compute_stack,
1704
+ new_deployment.provider,
1705
+ new_deployment.region,
1706
+ ) in existing_stack_provider_regions:
1707
+ self.log.warning(
1708
+ f"A {new_deployment.provider} {new_deployment.compute_stack} deployment in region {new_deployment.region} already exists."
1709
+ )
1710
+ confirm("Would you like to proceed with adding this deployment?", yes)
1711
+
1712
+ # Add the deployment.
1713
+ try:
1714
+ self.api_client.add_cloud_deployment_api_v2_clouds_cloud_id_add_deployment_put(
1715
+ cloud_id=cloud_id, cloud_deployment=new_deployment,
1716
+ )
1717
+ except Exception as e: # noqa: BLE001
1718
+ raise ClickException(f"Failed to add cloud deployment: {e}")
1719
+
1720
+ self.log.info(
1721
+ f"Successfully added deployment {new_deployment.name} to cloud {existing_spec['name']}!"
1722
+ )
1723
+
1688
1724
  def update_cloud_deployments( # noqa: PLR0912
1689
1725
  self, spec_file: str, yes: bool = False,
1690
1726
  ):
@@ -1707,58 +1743,41 @@ class CloudController(BaseController):
1707
1743
  raise ClickException("Changing the name of a cloud is not supported.")
1708
1744
 
1709
1745
  # Diff the existing and new specs
1710
- diff = self._generate_diff(existing_spec, spec)
1746
+ diff = self._generate_diff(existing_spec["deployments"], spec["deployments"])
1711
1747
  if not diff:
1712
1748
  self.log.info("No changes detected.")
1713
1749
  return
1714
1750
 
1715
- # Get updated/new deployments.
1716
- try:
1717
- deployments = [CloudDeployment(**d) for d in spec["deployments"]]
1718
- except Exception as e: # noqa: BLE001
1719
- raise ClickException(f"Failed to parse deployments: {e}")
1720
-
1721
1751
  existing_deployments = {
1722
1752
  deployment["cloud_deployment_id"]: CloudDeployment(**deployment)
1723
1753
  for deployment in existing_spec["deployments"]
1724
1754
  }
1725
1755
 
1726
- # Figure out which deployments have been updated/added.
1727
- updated_deployments = self._compare_cloud_deployments(
1728
- deployments, existing_deployments,
1729
- )
1756
+ updated_deployments: List[CloudDeployment] = []
1757
+ for d in spec["deployments"]:
1758
+ try:
1759
+ deployment = CloudDeployment(**d)
1760
+ except Exception as e: # noqa: BLE001
1761
+ raise ClickException(f"Failed to parse deployment: {e}")
1762
+
1763
+ if not deployment.cloud_deployment_id:
1764
+ raise ClickException(
1765
+ "All cloud deployments must include a cloud_deployment_id."
1766
+ )
1767
+ if deployment.cloud_deployment_id not in existing_deployments:
1768
+ raise ClickException(
1769
+ f"Cloud deployment {deployment.cloud_deployment_id} not found."
1770
+ )
1771
+ if deployment.provider == CloudProviders.PCP:
1772
+ raise ClickException(
1773
+ "Please use the `anyscale machine-pool` CLI to update machine pools."
1774
+ )
1775
+ if deployment != existing_deployments[deployment.cloud_deployment_id]:
1776
+ updated_deployments.append(deployment)
1730
1777
 
1731
1778
  # Log the diff and confirm.
1732
1779
  self.log.info(f"Detected the following changes:\n{diff}")
1733
1780
 
1734
- existing_deployment_ids = {
1735
- d.cloud_deployment_id for d in updated_deployments if d.cloud_deployment_id
1736
- }
1737
- if len(updated_deployments) - len(existing_deployment_ids):
1738
- self.log.info(
1739
- f"{len(updated_deployments) - len(existing_deployment_ids)} new deployment(s) will be added."
1740
- )
1741
- if existing_deployment_ids:
1742
- self.log.info(
1743
- f"{len(existing_deployment_ids)} existing deployment(s) will be updated ({', '.join(existing_deployment_ids)})"
1744
- )
1745
-
1746
- # Log an additional warning if a new deployment is being added but a deployment with the same AWS/GCP region already exists.
1747
- existing_stack_provider_regions = {
1748
- (d.compute_stack, d.provider, d.region)
1749
- for d in existing_deployments.values()
1750
- if d.provider in (CloudProviders.AWS, CloudProviders.GCP)
1751
- }
1752
- for d in updated_deployments:
1753
- if (
1754
- not d.cloud_deployment_id
1755
- and (d.compute_stack, d.provider, d.region)
1756
- in existing_stack_provider_regions
1757
- ):
1758
- self.log.warning(
1759
- f"A {d.provider} {d.compute_stack} deployment in region {d.region} already exists."
1760
- )
1761
-
1762
1781
  confirm("Would you like to proceed with updating this cloud?", yes)
1763
1782
 
1764
1783
  # Preprocess the deployments if necessary.
@@ -2658,7 +2677,10 @@ class CloudController(BaseController):
2658
2677
  self.api_client, cloud_id, CloudProviders.GCP, self.log
2659
2678
  )
2660
2679
  verify_filestore_result = True
2661
- if cloud_resource.gcp_filestore_config.instance_name:
2680
+ if (
2681
+ cloud_resource.gcp_filestore_config
2682
+ and cloud_resource.gcp_filestore_config.instance_name
2683
+ ):
2662
2684
  verify_filestore_result = verify_lib.verify_filestore(
2663
2685
  factory, cloud_resource, region, gcp_logger, strict=strict
2664
2686
  )
@@ -2679,7 +2701,10 @@ class CloudController(BaseController):
2679
2701
  f"cloud storage: {self._passed_or_failed_str_from_bool(verify_cloud_storage_result)}",
2680
2702
  ]
2681
2703
 
2682
- if cloud_resource.gcp_filestore_config.instance_name:
2704
+ if (
2705
+ cloud_resource.gcp_filestore_config
2706
+ and cloud_resource.gcp_filestore_config.instance_name
2707
+ ):
2683
2708
  verification_results.append(
2684
2709
  f"filestore: {self._passed_or_failed_str_from_bool(verify_filestore_result)}"
2685
2710
  )