anyscale 0.26.64__py3-none-any.whl → 0.26.66__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. anyscale/_private/anyscale_client/common.py +1 -1
  2. anyscale/_private/docgen/__main__.py +23 -4
  3. anyscale/_private/docgen/generator.py +127 -34
  4. anyscale/_private/docgen/generator_legacy.py +35 -12
  5. anyscale/client/README.md +37 -1
  6. anyscale/client/openapi_client/__init__.py +26 -0
  7. anyscale/client/openapi_client/api/default_api.py +1446 -245
  8. anyscale/client/openapi_client/models/__init__.py +26 -0
  9. anyscale/client/openapi_client/models/baseimagesenum.py +70 -1
  10. anyscale/client/openapi_client/models/cloud_deployment_compute_config.py +29 -1
  11. anyscale/client/openapi_client/models/connection_type.py +99 -0
  12. anyscale/client/openapi_client/models/create_experimental_workspace.py +29 -1
  13. anyscale/client/openapi_client/models/data_catalog.py +281 -0
  14. anyscale/client/openapi_client/models/data_catalog_connection.py +308 -0
  15. anyscale/client/openapi_client/models/data_catalog_connection_status.py +102 -0
  16. anyscale/client/openapi_client/models/data_catalog_provider.py +101 -0
  17. anyscale/client/openapi_client/models/databricks_connection_config.py +152 -0
  18. anyscale/client/openapi_client/models/databricks_connection_info.py +229 -0
  19. anyscale/client/openapi_client/models/databricks_connection_response.py +148 -0
  20. anyscale/client/openapi_client/models/databricks_register_request.py +187 -0
  21. anyscale/client/openapi_client/models/databricksconnectioninfo_response.py +121 -0
  22. anyscale/client/openapi_client/models/databricksconnectionresponse_response.py +121 -0
  23. anyscale/client/openapi_client/models/datacatalog_list_response.py +147 -0
  24. anyscale/client/openapi_client/models/datacatalogconnection_list_response.py +147 -0
  25. anyscale/client/openapi_client/models/decorated_session.py +29 -1
  26. anyscale/client/openapi_client/models/domain_verification.py +181 -0
  27. anyscale/client/openapi_client/models/list_databricks_connections.py +121 -0
  28. anyscale/client/openapi_client/models/o_auth_connection_response.py +229 -0
  29. anyscale/client/openapi_client/models/oauth_auth_url_response.py +121 -0
  30. anyscale/client/openapi_client/models/oauthconnectionresponse_response.py +121 -0
  31. anyscale/client/openapi_client/models/sso_config.py +148 -0
  32. anyscale/client/openapi_client/models/sso_connection.py +148 -0
  33. anyscale/client/openapi_client/models/sso_connection_state.py +100 -0
  34. anyscale/client/openapi_client/models/ssoconfig_response.py +121 -0
  35. anyscale/client/openapi_client/models/supportedbaseimagesenum.py +70 -1
  36. anyscale/client/openapi_client/models/task_summary_config.py +29 -3
  37. anyscale/client/openapi_client/models/task_table_config.py +29 -3
  38. anyscale/client/openapi_client/models/update_workspace_template.py +346 -0
  39. anyscale/client/openapi_client/models/usage_by_cluster_type.py +174 -0
  40. anyscale/client/openapi_client/models/usagebyclustertype_list_response.py +147 -0
  41. anyscale/client/openapi_client/models/validation_status.py +101 -0
  42. anyscale/commands/cloud_commands.py +310 -206
  43. anyscale/controllers/cloud_controller.py +175 -241
  44. anyscale/controllers/cloud_functional_verification_controller.py +6 -3
  45. anyscale/sdk/anyscale_client/models/baseimagesenum.py +70 -1
  46. anyscale/sdk/anyscale_client/models/cloud_deployment_compute_config.py +29 -1
  47. anyscale/sdk/anyscale_client/models/session.py +31 -3
  48. anyscale/sdk/anyscale_client/models/supportedbaseimagesenum.py +70 -1
  49. anyscale/shared_anyscale_utils/latest_ray_version.py +1 -1
  50. anyscale/util.py +1 -1
  51. anyscale/version.py +1 -1
  52. {anyscale-0.26.64.dist-info → anyscale-0.26.66.dist-info}/METADATA +1 -1
  53. {anyscale-0.26.64.dist-info → anyscale-0.26.66.dist-info}/RECORD +58 -33
  54. anyscale/commands/cloud_commands_util.py +0 -10
  55. {anyscale-0.26.64.dist-info → anyscale-0.26.66.dist-info}/WHEEL +0 -0
  56. {anyscale-0.26.64.dist-info → anyscale-0.26.66.dist-info}/entry_points.txt +0 -0
  57. {anyscale-0.26.64.dist-info → anyscale-0.26.66.dist-info}/licenses/LICENSE +0 -0
  58. {anyscale-0.26.64.dist-info → anyscale-0.26.66.dist-info}/licenses/NOTICE +0 -0
  59. {anyscale-0.26.64.dist-info → anyscale-0.26.66.dist-info}/top_level.txt +0 -0
@@ -48,7 +48,6 @@ from anyscale.client.openapi_client.models import (
48
48
  FileStorage,
49
49
  GCPConfig,
50
50
  GCPFileStoreConfig,
51
- KubernetesConfig,
52
51
  NetworkingMode,
53
52
  NFSMountTarget,
54
53
  ObjectStorage,
@@ -188,7 +187,10 @@ class CloudController(BaseController):
188
187
  )
189
188
 
190
189
  def list_clouds(
191
- self, cloud_name: Optional[str], cloud_id: Optional[str], max_items: int
190
+ self,
191
+ cloud_name: Optional[str],
192
+ cloud_id: Optional[str],
193
+ max_items: Optional[int] = None,
192
194
  ) -> str:
193
195
  if cloud_id is not None:
194
196
  clouds = [
@@ -201,12 +203,23 @@ class CloudController(BaseController):
201
203
  ).result
202
204
  ]
203
205
  else:
204
- clouds = self.api_client.list_clouds_api_v2_clouds_get(
205
- count=max_items
206
- ).results
207
- output = clouds_formatter.format_clouds_output(
208
- clouds=clouds[:max_items], json_format=False
209
- )
206
+ clouds = []
207
+ next_token = None
208
+ while True:
209
+ response = self.api_client.list_clouds_api_v2_clouds_get(
210
+ paging_token=next_token,
211
+ )
212
+ clouds.extend(response.results)
213
+ if not response.metadata.next_paging_token:
214
+ break
215
+ if max_items is not None and len(clouds) >= max_items:
216
+ break
217
+ next_token = response.metadata.next_paging_token
218
+
219
+ if max_items is not None:
220
+ clouds = clouds[:max_items]
221
+
222
+ output = clouds_formatter.format_clouds_output(clouds=clouds, json_format=False)
210
223
  return str(output)
211
224
 
212
225
  def _get_anyscale_cross_account_iam_policies(
@@ -2642,23 +2655,12 @@ class CloudController(BaseController):
2642
2655
  f"Overall Result: ALL {total_count} cloud resources verified successfully"
2643
2656
  )
2644
2657
 
2645
- def register_azure_or_generic_cloud( # noqa: PLR0913
2658
+ def register_azure_or_generic_cloud(
2646
2659
  self,
2647
2660
  name: str,
2648
2661
  provider: str,
2662
+ cloud_resource: CloudDeployment,
2649
2663
  auto_add_user: bool = False,
2650
- # Optional cloud-resource-scoped parameters.
2651
- # Some of these are conditionally required.
2652
- region: Optional[str] = "default",
2653
- cloud_storage_bucket_name: Optional[str] = None,
2654
- cloud_storage_bucket_endpoint: Optional[str] = None,
2655
- cloud_storage_bucket_region: Optional[str] = None,
2656
- nfs_mount_targets: Optional[List[str]] = None,
2657
- nfs_mount_path: Optional[str] = None,
2658
- persistent_volume_claim: Optional[str] = None,
2659
- csi_ephemeral_volume_driver: Optional[str] = None,
2660
- kubernetes_zones: Optional[List[str]] = None,
2661
- anyscale_operator_iam_identity: Optional[str] = None,
2662
2664
  ) -> None:
2663
2665
  cloud_provider = (
2664
2666
  CloudProviders.AZURE if provider == "azure" else CloudProviders.GENERIC
@@ -2670,29 +2672,12 @@ class CloudController(BaseController):
2670
2672
  CloudAnalyticsEventName.COMMAND_START, succeeded=True
2671
2673
  )
2672
2674
 
2673
- # Handle parsing / conversion of nfs_mount_targets.
2674
- mount_targets: List[NFSMountTarget] = []
2675
- for target in nfs_mount_targets or []:
2676
- parts = [part.strip() for part in target.split(",")]
2677
- if len(parts) == 1:
2678
- mount_targets.append(NFSMountTarget(address=parts[0]))
2679
- elif len(parts) == 2:
2680
- mount_targets.append(NFSMountTarget(address=parts[1], zone=parts[0]))
2681
- else:
2682
- raise ClickException(
2683
- f"Invalid mount target {target}; expected (zone,address) tuple or a singular address."
2684
- )
2685
-
2686
- # Handle defaulting of region.
2687
- if not region:
2688
- region = "default"
2689
-
2690
2675
  # Attempt to create the cloud.
2691
2676
  try:
2692
2677
  created_cloud = self.api_client.create_cloud_api_v2_clouds_post(
2693
2678
  write_cloud=WriteCloud(
2694
2679
  name=name,
2695
- region=region,
2680
+ region=cloud_resource.region,
2696
2681
  provider=cloud_provider,
2697
2682
  is_bring_your_own_resource=True,
2698
2683
  cluster_management_stack_version=ClusterManagementStackVersions.V2,
@@ -2717,30 +2702,7 @@ class CloudController(BaseController):
2717
2702
  try:
2718
2703
  with self.log.spinner("Registering Anyscale cloud resources..."):
2719
2704
  self.api_client.add_cloud_resource_api_v2_clouds_cloud_id_add_resource_put(
2720
- cloud_id=cloud_id,
2721
- cloud_deployment=CloudDeployment(
2722
- compute_stack=ComputeStack.K8S,
2723
- provider=cloud_provider,
2724
- region=region,
2725
- object_storage=ObjectStorage(
2726
- bucket_name=cloud_storage_bucket_name,
2727
- region=cloud_storage_bucket_region or region,
2728
- endpoint=cloud_storage_bucket_endpoint,
2729
- )
2730
- if cloud_storage_bucket_name
2731
- else None,
2732
- file_storage=FileStorage(
2733
- mount_targets=mount_targets,
2734
- mount_path=nfs_mount_path,
2735
- persistent_volume_claim=persistent_volume_claim,
2736
- csi_ephemeral_volume_driver=csi_ephemeral_volume_driver,
2737
- )
2738
- if mount_targets
2739
- or persistent_volume_claim
2740
- or csi_ephemeral_volume_driver
2741
- else None,
2742
- kubernetes_config=KubernetesConfig(zones=kubernetes_zones,),
2743
- ),
2705
+ cloud_id=cloud_id, cloud_deployment=cloud_resource,
2744
2706
  )
2745
2707
 
2746
2708
  self.cloud_event_producer.produce(
@@ -2776,10 +2738,13 @@ class CloudController(BaseController):
2776
2738
  helm_command = self._generate_helm_upgrade_command(
2777
2739
  provider=provider,
2778
2740
  cloud_deployment_id=cloud_resource_id,
2779
- region=region if cloud_provider == CloudProviders.AZURE else None,
2780
- operator_iam_identity=anyscale_operator_iam_identity
2741
+ region=cloud_resource.region
2781
2742
  if cloud_provider == CloudProviders.AZURE
2782
2743
  else None,
2744
+ operator_iam_identity=cloud_resource.kubernetes_config.anyscale_operator_iam_identity
2745
+ if cloud_provider == CloudProviders.AZURE
2746
+ and cloud_resource.kubernetes_config
2747
+ else None,
2783
2748
  anyscale_cli_token=None, # TODO: use $ANYSCALE_CLI_TOKEN placeholder
2784
2749
  )
2785
2750
 
@@ -2787,48 +2752,42 @@ class CloudController(BaseController):
2787
2752
  f"Cloud registration complete! To install the Anyscale operator, run:\n\n{helm_command}"
2788
2753
  )
2789
2754
 
2790
- def register_aws_cloud( # noqa: PLR0913, PLR0912, C901
2755
+ def register_aws_cloud( # noqa: C901, PLR0912
2791
2756
  self,
2792
2757
  *,
2793
- region: str,
2794
2758
  name: str,
2795
- vpc_id: str,
2796
- subnet_ids: List[str],
2797
- efs_id: Optional[str],
2798
- anyscale_iam_role_id: str,
2799
- instance_iam_role_id: str,
2800
- security_group_ids: List[str],
2801
- cloud_storage_bucket_name: str,
2802
- memorydb_cluster_id: Optional[str],
2803
- functional_verify: Optional[str],
2804
- private_network: bool,
2805
- cluster_management_stack_version: ClusterManagementStackVersions,
2759
+ cloud_resource: CloudDeployment,
2760
+ functional_verify: Optional[str] = None,
2761
+ cluster_management_stack_version: ClusterManagementStackVersions = ClusterManagementStackVersions.V2,
2806
2762
  yes: bool = False,
2807
2763
  skip_verifications: bool = False,
2808
2764
  auto_add_user: bool = False,
2809
- external_id: Optional[str] = None,
2810
- # Default to ComputeStack.VM for backwards compatibility
2811
- # for SDK users who do not specify a compute stack here.
2812
- compute_stack: ComputeStack = ComputeStack.VM,
2813
- kubernetes_zones: Optional[List[str]] = None,
2814
- anyscale_operator_iam_identity: Optional[str] = None,
2815
- persistent_volume_claim: Optional[str] = None,
2816
- csi_ephemeral_volume_driver: Optional[str] = None,
2817
2765
  ):
2818
2766
  functions_to_verify = self._validate_functional_verification_args(
2819
2767
  functional_verify
2820
2768
  )
2769
+
2821
2770
  if not validate_aws_credentials(self.log):
2822
2771
  raise ClickException(
2823
2772
  "Cloud registration requires valid AWS credentials to be set locally. Learn more: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html"
2824
2773
  )
2825
2774
 
2826
- # We accept both the full ARN and the bucket name as input
2827
- # but we unify it to the bucket name here
2828
- if cloud_storage_bucket_name.startswith(S3_ARN_PREFIX):
2829
- cloud_storage_bucket_name = cloud_storage_bucket_name[len(S3_ARN_PREFIX) :]
2830
- if not cloud_storage_bucket_name.startswith(S3_STORAGE_PREFIX):
2831
- cloud_storage_bucket_name = S3_STORAGE_PREFIX + cloud_storage_bucket_name
2775
+ assert cloud_resource.aws_config
2776
+
2777
+ if not (
2778
+ cloud_resource.object_storage and cloud_resource.object_storage.bucket_name
2779
+ ):
2780
+ raise click.ClickException(
2781
+ "Cloud object storage is required for AWS cloud registration."
2782
+ )
2783
+ if cloud_resource.object_storage.bucket_name.startswith(S3_ARN_PREFIX):
2784
+ cloud_resource.object_storage.bucket_name = cloud_resource.object_storage.bucket_name[
2785
+ len(S3_ARN_PREFIX) :
2786
+ ]
2787
+ if not cloud_resource.object_storage.bucket_name.startswith(S3_STORAGE_PREFIX):
2788
+ cloud_resource.object_storage.bucket_name = (
2789
+ S3_STORAGE_PREFIX + cloud_resource.object_storage.bucket_name
2790
+ )
2832
2791
 
2833
2792
  self.cloud_event_producer.init_trace_context(
2834
2793
  CloudAnalyticsEventCommandName.REGISTER, CloudProviders.AWS
@@ -2837,27 +2796,29 @@ class CloudController(BaseController):
2837
2796
  CloudAnalyticsEventName.COMMAND_START, succeeded=True
2838
2797
  )
2839
2798
 
2840
- credentials = anyscale_iam_role_id
2841
- if compute_stack == ComputeStack.K8S:
2799
+ if cloud_resource.compute_stack == ComputeStack.K8S:
2842
2800
  # On K8S, we don't need to collect credentials;
2843
2801
  # instead, write a random value into this field
2844
2802
  # to maintain the property that each cloud's
2845
2803
  # credentials are unique.
2846
2804
  credentials = uuid.uuid4().hex
2805
+ else:
2806
+ credentials = cloud_resource.aws_config.anyscale_iam_role_id
2847
2807
 
2848
2808
  # Create a cloud without cloud resources first
2849
2809
  try:
2850
2810
  created_cloud = self.api_client.create_cloud_api_v2_clouds_post(
2851
2811
  write_cloud=WriteCloud(
2852
2812
  provider="AWS",
2853
- region=region,
2813
+ region=cloud_resource.region,
2854
2814
  credentials=credentials,
2855
2815
  name=name,
2856
2816
  is_bring_your_own_resource=True,
2857
- is_private_cloud=private_network,
2817
+ is_private_cloud=cloud_resource.networking_mode
2818
+ == NetworkingMode.PRIVATE,
2858
2819
  cluster_management_stack_version=cluster_management_stack_version,
2859
2820
  auto_add_user=auto_add_user,
2860
- external_id=external_id,
2821
+ external_id=cloud_resource.aws_config.external_id,
2861
2822
  )
2862
2823
  )
2863
2824
  cloud_id = created_cloud.result.id
@@ -2874,37 +2835,6 @@ class CloudController(BaseController):
2874
2835
  raise
2875
2836
 
2876
2837
  try:
2877
- cloud_resource = CloudDeployment(
2878
- compute_stack=compute_stack,
2879
- provider=CloudProviders.AWS,
2880
- region=region,
2881
- networking_mode=NetworkingMode.PRIVATE
2882
- if private_network
2883
- else NetworkingMode.PUBLIC,
2884
- object_storage=ObjectStorage(bucket_name=cloud_storage_bucket_name),
2885
- file_storage=FileStorage(
2886
- file_storage_id=efs_id,
2887
- persistent_volume_claim=persistent_volume_claim,
2888
- csi_ephemeral_volume_driver=csi_ephemeral_volume_driver,
2889
- )
2890
- if efs_id or persistent_volume_claim or csi_ephemeral_volume_driver
2891
- else None,
2892
- aws_config=AWSConfig(
2893
- vpc_id=vpc_id,
2894
- subnet_ids=subnet_ids,
2895
- security_group_ids=security_group_ids,
2896
- anyscale_iam_role_id=anyscale_iam_role_id,
2897
- external_id=external_id,
2898
- cluster_iam_role_id=instance_iam_role_id,
2899
- memorydb_cluster_name=memorydb_cluster_id,
2900
- ),
2901
- kubernetes_config=KubernetesConfig(
2902
- anyscale_operator_iam_identity=anyscale_operator_iam_identity,
2903
- zones=kubernetes_zones,
2904
- )
2905
- if compute_stack == ComputeStack.K8S
2906
- else None,
2907
- )
2908
2838
  role, iam_role_original_policy = self._preprocess_aws(
2909
2839
  cloud_id=cloud_id, deployment=cloud_resource
2910
2840
  )
@@ -2930,7 +2860,10 @@ class CloudController(BaseController):
2930
2860
  cloud_id=cloud_id
2931
2861
  )
2932
2862
  try:
2933
- if iam_role_original_policy is not None and external_id is None:
2863
+ if (
2864
+ iam_role_original_policy is not None
2865
+ and cloud_resource.aws_config.external_id is None
2866
+ ):
2934
2867
  # Revert the assume policy back to the original policy
2935
2868
  role.AssumeRolePolicy().update( # type: ignore
2936
2869
  PolicyDocument=json.dumps(iam_role_original_policy)
@@ -2945,7 +2878,7 @@ class CloudController(BaseController):
2945
2878
  # Verify cloud resources meet our requirement
2946
2879
  # Verification is only performed for VM compute stack.
2947
2880
  # TODO (shomilj): Add verification to the K8S compute stack as well.
2948
- if compute_stack == ComputeStack.VM:
2881
+ if cloud_resource.compute_stack != ComputeStack.K8S:
2949
2882
  with self.log.spinner("Verifying cloud resources...") as spinner:
2950
2883
  if (
2951
2884
  not skip_verifications
@@ -2983,7 +2916,10 @@ class CloudController(BaseController):
2983
2916
  cloud_id=cloud_id
2984
2917
  )
2985
2918
  try:
2986
- if iam_role_original_policy is not None and external_id is None:
2919
+ if (
2920
+ iam_role_original_policy is not None
2921
+ and cloud_resource.aws_config.external_id is None
2922
+ ):
2987
2923
  # Revert the assume policy back to the original policy
2988
2924
  role.AssumeRolePolicy().update( # type: ignore
2989
2925
  PolicyDocument=json.dumps(iam_role_original_policy)
@@ -3006,9 +2942,7 @@ class CloudController(BaseController):
3006
2942
  # For now, only wait for the cloud to be active if the compute stack is VM.
3007
2943
  # TODO (shomilj): support this fully for Kubernetes after provider metadata
3008
2944
  # checks are removed.
3009
- if compute_stack == ComputeStack.VM:
3010
- self.wait_for_cloud_to_be_active(cloud_id, CloudProviders.AWS)
3011
- if compute_stack == ComputeStack.K8S:
2945
+ if cloud_resource.compute_stack == ComputeStack.K8S:
3012
2946
  # Get the cloud resource ID to pass to the helm command.
3013
2947
  cloud_resources = self.api_client.get_cloud_resources_api_v2_clouds_cloud_id_resources_get(
3014
2948
  cloud_id=cloud_id,
@@ -3021,11 +2955,13 @@ class CloudController(BaseController):
3021
2955
  helm_command = self._generate_helm_upgrade_command(
3022
2956
  provider="aws",
3023
2957
  cloud_deployment_id=cloud_resource_id,
3024
- region=region,
2958
+ region=cloud_resource.region,
3025
2959
  )
3026
2960
  self.log.info(
3027
2961
  f"Cloud registration complete! To install the Anyscale operator, run:\n\n{helm_command}"
3028
2962
  )
2963
+ else:
2964
+ self.wait_for_cloud_to_be_active(cloud_id, CloudProviders.AWS)
3029
2965
  self.cloud_event_producer.produce(
3030
2966
  CloudAnalyticsEventName.INFRA_SETUP_COMPLETE, succeeded=True
3031
2967
  )
@@ -3042,7 +2978,10 @@ class CloudController(BaseController):
3042
2978
  cloud_id=cloud_id
3043
2979
  )
3044
2980
  try:
3045
- if iam_role_original_policy is not None and external_id is None:
2981
+ if (
2982
+ iam_role_original_policy is not None
2983
+ and cloud_resource.aws_config.external_id is None
2984
+ ):
3046
2985
  # Revert the assume policy back to the original policy
3047
2986
  role.AssumeRolePolicy().update( # type: ignore
3048
2987
  PolicyDocument=json.dumps(iam_role_original_policy)
@@ -3281,52 +3220,77 @@ class CloudController(BaseController):
3281
3220
  ]
3282
3221
  )
3283
3222
 
3284
- def register_gcp_cloud( # noqa: PLR0913, PLR0912, C901
3223
+ def _validate_gcp_config(self, compute_stack: ComputeStack, gcp_config: GCPConfig):
3224
+ if gcp_config.project_id and gcp_config.project_id[0].isdigit():
3225
+ # project ID should start with a letter
3226
+ raise click.ClickException(
3227
+ "Please provide a valid project ID. Note that project ID is not project number, see https://cloud.google.com/resource-manager/docs/creating-managing-projects#before_you_begin for details."
3228
+ )
3229
+
3230
+ if (
3231
+ compute_stack != ComputeStack.K8S
3232
+ and re.search(
3233
+ "projects\\/[0-9]*\\/locations\\/global\\/workloadIdentityPools\\/.+\\/providers\\/[a-z0-9-]*$",
3234
+ gcp_config.provider_name,
3235
+ )
3236
+ is None
3237
+ ):
3238
+ raise click.ClickException(
3239
+ "Please provide a valid, fully qualified provider name. Only lowercase letters, numbers, and dashes are allowed. Example: projects/<project number>/locations/global/workloadIdentityPools/<pool name>/providers/<provider id>"
3240
+ )
3241
+
3242
+ if (
3243
+ gcp_config.memorystore_instance_name is not None
3244
+ and re.search(
3245
+ "projects/.+/locations/.+/instances/.+",
3246
+ gcp_config.memorystore_instance_name,
3247
+ )
3248
+ is None
3249
+ ):
3250
+ raise click.ClickException(
3251
+ "Please provide a valid memorystore instance name. Example: projects/<project number>/locations/<location>/instances/<instance id>"
3252
+ )
3253
+
3254
+ if (
3255
+ gcp_config.host_project_id is not None
3256
+ and gcp_config.host_project_id[0].isdigit()
3257
+ ):
3258
+ # project ID should start with a letter
3259
+ raise click.ClickException(
3260
+ "Please provide a valid host project ID. Note that project ID is not project number, see https://cloud.google.com/resource-manager/docs/creating-managing-projects#before_you_begin for details."
3261
+ )
3262
+
3263
+ def register_gcp_cloud( # noqa: C901, PLR0912
3285
3264
  self,
3286
3265
  *,
3287
- region: str,
3288
3266
  name: str,
3289
- project_id: str,
3290
- vpc_name: str,
3291
- subnet_names: List[str],
3292
- filestore_instance_id: str,
3293
- filestore_location: str,
3294
- anyscale_service_account_email: str,
3295
- instance_service_account_email: str,
3296
- provider_id: str,
3297
- firewall_policy_names: List[str],
3298
- cloud_storage_bucket_name: str,
3299
- memorystore_instance_name: Optional[str],
3300
- functional_verify: Optional[str],
3301
- private_network: bool,
3302
- cluster_management_stack_version: ClusterManagementStackVersions,
3303
- host_project_id: Optional[str] = None,
3267
+ cloud_resource: CloudDeployment,
3268
+ functional_verify: Optional[str] = None,
3269
+ cluster_management_stack_version: ClusterManagementStackVersions = ClusterManagementStackVersions.V2,
3304
3270
  yes: bool = False,
3305
3271
  skip_verifications: bool = False,
3306
3272
  auto_add_user: bool = False,
3307
- # Default to ComputeStack.VM for backwards compatibility
3308
- # for SDK users who do not specify a compute stack here.
3309
- compute_stack: ComputeStack = ComputeStack.VM,
3310
- kubernetes_zones: Optional[List[str]] = None,
3311
- anyscale_operator_iam_identity: Optional[str] = None,
3312
- persistent_volume_claim: Optional[str] = None,
3313
- csi_ephemeral_volume_driver: Optional[str] = None,
3314
3273
  ):
3315
3274
  functions_to_verify = self._validate_functional_verification_args(
3316
3275
  functional_verify
3317
3276
  )
3318
3277
 
3319
- # Create a cloud without cloud resources first
3320
- # Provider ID is optional for K8s clouds.
3321
- if compute_stack != ComputeStack.K8S:
3322
- provider_id_re_result = re.search(
3323
- "projects\\/[0-9]*\\/locations\\/global\\/workloadIdentityPools\\/.+\\/providers\\/[a-z0-9-]*$",
3324
- provider_id,
3278
+ assert cloud_resource.compute_stack
3279
+ assert cloud_resource.gcp_config
3280
+ self._validate_gcp_config(
3281
+ cloud_resource.compute_stack, cloud_resource.gcp_config
3282
+ )
3283
+
3284
+ if not (
3285
+ cloud_resource.object_storage and cloud_resource.object_storage.bucket_name
3286
+ ):
3287
+ raise click.ClickException(
3288
+ "Cloud object storage is required for GCP cloud registration."
3289
+ )
3290
+ if not cloud_resource.object_storage.bucket_name.startswith(GCS_STORAGE_PREFIX):
3291
+ cloud_resource.object_storage.bucket_name = (
3292
+ GCS_STORAGE_PREFIX + cloud_resource.object_storage.bucket_name
3325
3293
  )
3326
- if provider_id_re_result is None:
3327
- raise ClickException(
3328
- f"Invalid provider_id {provider_id}. Only lowercase letters, numbers, and dashes are allowed."
3329
- )
3330
3294
 
3331
3295
  self.cloud_event_producer.init_trace_context(
3332
3296
  CloudAnalyticsEventCommandName.REGISTER, CloudProviders.GCP
@@ -3336,16 +3300,7 @@ class CloudController(BaseController):
3336
3300
  )
3337
3301
 
3338
3302
  try:
3339
- credentials_dict = {
3340
- "project_id": project_id or "",
3341
- "provider_id": provider_id or "",
3342
- "service_account_email": anyscale_service_account_email or "",
3343
- }
3344
- if host_project_id:
3345
- credentials_dict["host_project_id"] = host_project_id
3346
- credentials = json.dumps(credentials_dict)
3347
-
3348
- if compute_stack == ComputeStack.K8S:
3303
+ if cloud_resource.compute_stack == ComputeStack.K8S:
3349
3304
  # On K8S, we don't need to collect credentials;
3350
3305
  # instead, write a random trace_id into this field
3351
3306
  # to maintain the property that each cloud's
@@ -3358,19 +3313,34 @@ class CloudController(BaseController):
3358
3313
  "service_account_email": random_id,
3359
3314
  }
3360
3315
  )
3316
+ else:
3317
+ credentials_dict = {
3318
+ "project_id": cloud_resource.gcp_config.project_id or "",
3319
+ "provider_id": cloud_resource.gcp_config.provider_name or "",
3320
+ "service_account_email": cloud_resource.gcp_config.anyscale_service_account_email
3321
+ or "",
3322
+ }
3323
+ if cloud_resource.gcp_config.host_project_id:
3324
+ credentials_dict[
3325
+ "host_project_id"
3326
+ ] = cloud_resource.gcp_config.host_project_id
3327
+ credentials = json.dumps(credentials_dict)
3361
3328
 
3362
3329
  # NOTE: For now we set the is_private_service_cloud to be the same as is_private_cloud
3363
3330
  # We don't expose this to the user yet since it's not recommended.
3364
- is_private_service_cloud = private_network
3331
+ is_private_network = (
3332
+ cloud_resource.networking_mode == NetworkingMode.PRIVATE
3333
+ )
3334
+ is_private_service_cloud = is_private_network
3365
3335
 
3366
3336
  created_cloud = self.api_client.create_cloud_api_v2_clouds_post(
3367
3337
  write_cloud=WriteCloud(
3368
3338
  provider="GCP",
3369
- region=region,
3339
+ region=cloud_resource.region,
3370
3340
  credentials=credentials,
3371
3341
  name=name,
3372
3342
  is_bring_your_own_resource=True,
3373
- is_private_cloud=private_network,
3343
+ is_private_cloud=is_private_network,
3374
3344
  cluster_management_stack_version=cluster_management_stack_version,
3375
3345
  is_private_service_cloud=is_private_service_cloud,
3376
3346
  auto_add_user=auto_add_user,
@@ -3390,69 +3360,31 @@ class CloudController(BaseController):
3390
3360
  raise
3391
3361
 
3392
3362
  try:
3393
- enable_filestore = filestore_location and filestore_instance_id
3363
+ file_storage = cloud_resource.file_storage
3364
+ # Check if filestore is enabled
3365
+ enable_filestore = file_storage and file_storage.file_storage_id
3394
3366
 
3395
3367
  # Normally, for Kubernetes clouds, we don't need a VPC name, since networking is managed by Kubernetes.
3396
3368
  # For Kubernetes clouds on GCP where Filestore is enabled, we require the VPC name, since it is needed
3397
3369
  # to look up the relevant Mount Target IP for Filestore in the VPC.
3398
- if compute_stack == ComputeStack.K8S:
3399
- if enable_filestore and not vpc_name:
3370
+ if cloud_resource.compute_stack == ComputeStack.K8S:
3371
+ if enable_filestore and not cloud_resource.gcp_config.vpc_name:
3400
3372
  raise ClickException(
3401
3373
  "Please provide the name of the VPC that your Kubernetes cluster is running inside of."
3402
3374
  )
3403
- if (enable_filestore or memorystore_instance_name) and not project_id:
3404
- raise ClickException("Please provide a project ID.")
3405
-
3406
- if not cloud_storage_bucket_name.startswith(GCS_STORAGE_PREFIX):
3407
- cloud_storage_bucket_name = (
3408
- GCS_STORAGE_PREFIX + cloud_storage_bucket_name
3375
+ memorystore_instance_name = (
3376
+ cloud_resource.gcp_config.memorystore_instance_name
3409
3377
  )
3410
-
3411
- cloud_resource = CloudDeployment(
3412
- compute_stack=compute_stack,
3413
- provider=CloudProviders.GCP,
3414
- region=region,
3415
- networking_mode=NetworkingMode.PRIVATE
3416
- if private_network
3417
- else NetworkingMode.PUBLIC,
3418
- object_storage=ObjectStorage(bucket_name=cloud_storage_bucket_name),
3419
- file_storage=FileStorage(
3420
- file_storage_id="projects/{}/locations/{}/instances/{}".format(
3421
- project_id, filestore_location, filestore_instance_id
3422
- )
3423
- if filestore_instance_id
3424
- else None,
3425
- persistent_volume_claim=persistent_volume_claim,
3426
- csi_ephemeral_volume_driver=csi_ephemeral_volume_driver,
3427
- )
3428
- if filestore_instance_id
3429
- or persistent_volume_claim
3430
- or csi_ephemeral_volume_driver
3431
- else None,
3432
- gcp_config=GCPConfig(
3433
- project_id=project_id,
3434
- host_project_id=host_project_id,
3435
- provider_name=provider_id,
3436
- vpc_name=vpc_name,
3437
- subnet_names=subnet_names,
3438
- firewall_policy_names=firewall_policy_names,
3439
- anyscale_service_account_email=anyscale_service_account_email,
3440
- cluster_service_account_email=instance_service_account_email,
3441
- memorystore_instance_name=memorystore_instance_name,
3442
- ),
3443
- kubernetes_config=KubernetesConfig(
3444
- anyscale_operator_iam_identity=anyscale_operator_iam_identity,
3445
- zones=kubernetes_zones,
3446
- )
3447
- if compute_stack == ComputeStack.K8S
3448
- else None,
3449
- )
3378
+ if (
3379
+ enable_filestore or memorystore_instance_name
3380
+ ) and not cloud_resource.gcp_config.project_id:
3381
+ raise ClickException("Please provide a project ID.")
3450
3382
 
3451
3383
  self._preprocess_gcp(cloud_resource)
3452
3384
 
3453
3385
  # Verification is only performed for VM compute stack.
3454
3386
  # TODO (shomilj): Add verification to the K8S compute stack as well.
3455
- if compute_stack == ComputeStack.VM:
3387
+ if cloud_resource.compute_stack != ComputeStack.K8S:
3456
3388
  if (
3457
3389
  not skip_verifications
3458
3390
  and not self.verify_gcp_cloud_resources_from_cloud_deployment(
@@ -3500,9 +3432,7 @@ class CloudController(BaseController):
3500
3432
  # For now, only wait for the cloud to be active if the compute stack is VM.
3501
3433
  # TODO (shomilj): support this fully for Kubernetes after provider metadata
3502
3434
  # checks are removed.
3503
- if compute_stack == ComputeStack.VM:
3504
- self.wait_for_cloud_to_be_active(cloud_id, CloudProviders.GCP)
3505
- if compute_stack == ComputeStack.K8S:
3435
+ if cloud_resource.compute_stack == ComputeStack.K8S:
3506
3436
  # Get the cloud resource ID to pass to the helm command.
3507
3437
  cloud_resources = self.api_client.get_cloud_resources_api_v2_clouds_cloud_id_resources_get(
3508
3438
  cloud_id=cloud_id,
@@ -3515,20 +3445,24 @@ class CloudController(BaseController):
3515
3445
  helm_command = self._generate_helm_upgrade_command(
3516
3446
  provider="gcp",
3517
3447
  cloud_deployment_id=cloud_resource_id,
3518
- region=region,
3519
- operator_iam_identity=anyscale_service_account_email,
3448
+ region=cloud_resource.region,
3449
+ operator_iam_identity=cloud_resource.gcp_config.anyscale_service_account_email,
3520
3450
  )
3521
3451
  gcloud_command = self._generate_gcp_workload_identity_command(
3522
- anyscale_service_account_email=anyscale_service_account_email,
3523
- project_id=project_id,
3452
+ anyscale_service_account_email=cloud_resource.gcp_config.anyscale_service_account_email,
3453
+ project_id=cloud_resource.gcp_config.project_id,
3524
3454
  namespace="<namespace>",
3525
3455
  )
3526
3456
  self.log.info(
3527
3457
  f"Cloud registration complete! To install the Anyscale operator, run:\n\n{helm_command}\n\nThen configure workload identity by running:\n\n{gcloud_command}"
3528
3458
  )
3459
+ else:
3460
+ self.wait_for_cloud_to_be_active(cloud_id, CloudProviders.GCP)
3461
+
3529
3462
  self.cloud_event_producer.produce(
3530
3463
  CloudAnalyticsEventName.INFRA_SETUP_COMPLETE, succeeded=True
3531
3464
  )
3465
+
3532
3466
  except Exception as e: # noqa: BLE001
3533
3467
  self.log.error(str(e))
3534
3468
  self.cloud_event_producer.produce(
@@ -3657,7 +3591,7 @@ class CloudController(BaseController):
3657
3591
  )
3658
3592
 
3659
3593
  # Tear down admin zone and mark cloud as deleted
3660
- with self.log.spinner("Deleting Anyscale cloud..."):
3594
+ with self.log.spinner("Deleting Anyscale cloud (this may take 2-5 minutes)..."):
3661
3595
  try:
3662
3596
  self.api_client.delete_cloud_api_v2_clouds_cloud_id_delete(
3663
3597
  cloud_id=cloud_id