pulumi-alicloud 3.77.0a1746163013__py3-none-any.whl → 3.78.0a1746422202__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-alicloud might be problematic. Click here for more details.

Files changed (83) hide show
  1. pulumi_alicloud/__init__.py +72 -0
  2. pulumi_alicloud/_inputs.py +13 -0
  3. pulumi_alicloud/adb/db_cluster_lake_version.py +94 -0
  4. pulumi_alicloud/alb/_inputs.py +6 -3
  5. pulumi_alicloud/alb/outputs.py +4 -2
  6. pulumi_alicloud/apig/environment.py +2 -2
  7. pulumi_alicloud/apig/http_api.py +2 -2
  8. pulumi_alicloud/arms/grafana_workspace.py +56 -14
  9. pulumi_alicloud/cloudfirewall/instance_member.py +4 -4
  10. pulumi_alicloud/cloudfirewall/vpc_cen_tr_firewall.py +2 -2
  11. pulumi_alicloud/cloudsso/_inputs.py +697 -7
  12. pulumi_alicloud/cloudsso/directory.py +345 -65
  13. pulumi_alicloud/cloudsso/outputs.py +557 -8
  14. pulumi_alicloud/config/outputs.py +8 -0
  15. pulumi_alicloud/cs/_inputs.py +18 -18
  16. pulumi_alicloud/cs/edge_kubernetes.py +136 -100
  17. pulumi_alicloud/cs/get_kubernetes_node_pools.py +21 -1
  18. pulumi_alicloud/cs/kubernetes.py +118 -39
  19. pulumi_alicloud/cs/managed_kubernetes.py +125 -46
  20. pulumi_alicloud/cs/outputs.py +14 -14
  21. pulumi_alicloud/cs/serverless_kubernetes.py +66 -73
  22. pulumi_alicloud/ddos/ddos_coo_instance.py +175 -25
  23. pulumi_alicloud/dns/ddos_coo_instance.py +175 -25
  24. pulumi_alicloud/dts/job_monitor_rule.py +2 -2
  25. pulumi_alicloud/dts/synchronization_job.py +2 -2
  26. pulumi_alicloud/ecs/get_instance_types.py +4 -4
  27. pulumi_alicloud/ecs/instance.py +28 -28
  28. pulumi_alicloud/ecs/outputs.py +2 -2
  29. pulumi_alicloud/ecs/security_group_rule.py +32 -4
  30. pulumi_alicloud/eflo/__init__.py +3 -0
  31. pulumi_alicloud/eflo/_inputs.py +623 -0
  32. pulumi_alicloud/eflo/experiment_plan.py +573 -0
  33. pulumi_alicloud/eflo/experiment_plan_template.py +464 -0
  34. pulumi_alicloud/eflo/outputs.py +476 -0
  35. pulumi_alicloud/eflo/resource.py +388 -0
  36. pulumi_alicloud/ens/disk.py +120 -69
  37. pulumi_alicloud/ens/eip.py +45 -41
  38. pulumi_alicloud/esa/__init__.py +2 -0
  39. pulumi_alicloud/esa/scheduled_preload_execution.py +479 -0
  40. pulumi_alicloud/esa/scheduled_preload_job.py +467 -0
  41. pulumi_alicloud/gwlb/listener.py +2 -2
  42. pulumi_alicloud/gwlb/load_balancer.py +2 -2
  43. pulumi_alicloud/gwlb/server_group.py +2 -2
  44. pulumi_alicloud/ims/__init__.py +2 -0
  45. pulumi_alicloud/ims/get_oidc_providers.py +216 -0
  46. pulumi_alicloud/ims/outputs.py +138 -0
  47. pulumi_alicloud/mongodb/__init__.py +2 -0
  48. pulumi_alicloud/mongodb/_inputs.py +154 -0
  49. pulumi_alicloud/mongodb/instance.py +7 -7
  50. pulumi_alicloud/mongodb/outputs.py +121 -0
  51. pulumi_alicloud/mongodb/public_network_address.py +275 -0
  52. pulumi_alicloud/mongodb/replica_set_role.py +533 -0
  53. pulumi_alicloud/nas/_inputs.py +252 -18
  54. pulumi_alicloud/nas/file_system.py +649 -264
  55. pulumi_alicloud/nas/outputs.py +198 -12
  56. pulumi_alicloud/nlb/server_group_server_attachment.py +4 -0
  57. pulumi_alicloud/pai/__init__.py +1 -0
  58. pulumi_alicloud/pai/flow_pipeline.py +491 -0
  59. pulumi_alicloud/pulumi-plugin.json +1 -1
  60. pulumi_alicloud/ram/__init__.py +1 -0
  61. pulumi_alicloud/ram/get_role_policy_attachments.py +272 -0
  62. pulumi_alicloud/ram/outputs.py +63 -0
  63. pulumi_alicloud/ram/security_preference.py +496 -110
  64. pulumi_alicloud/rdc/organization.py +2 -2
  65. pulumi_alicloud/rds/instance.py +1 -1
  66. pulumi_alicloud/sae/application_scaling_rule.py +2 -2
  67. pulumi_alicloud/sae/ingress.py +2 -2
  68. pulumi_alicloud/schedulerx/app_group.py +2 -2
  69. pulumi_alicloud/schedulerx/job.py +2 -2
  70. pulumi_alicloud/selectdb/db_cluster.py +2 -0
  71. pulumi_alicloud/selectdb/db_instance.py +43 -13
  72. pulumi_alicloud/selectdb/get_db_clusters.py +2 -0
  73. pulumi_alicloud/selectdb/get_db_instances.py +2 -0
  74. pulumi_alicloud/selectdb/outputs.py +3 -3
  75. pulumi_alicloud/sls/__init__.py +1 -0
  76. pulumi_alicloud/sls/_inputs.py +295 -0
  77. pulumi_alicloud/sls/etl.py +516 -0
  78. pulumi_alicloud/sls/outputs.py +209 -0
  79. pulumi_alicloud/vpc/network.py +156 -88
  80. {pulumi_alicloud-3.77.0a1746163013.dist-info → pulumi_alicloud-3.78.0a1746422202.dist-info}/METADATA +1 -1
  81. {pulumi_alicloud-3.77.0a1746163013.dist-info → pulumi_alicloud-3.78.0a1746422202.dist-info}/RECORD +83 -71
  82. {pulumi_alicloud-3.77.0a1746163013.dist-info → pulumi_alicloud-3.78.0a1746422202.dist-info}/WHEEL +1 -1
  83. {pulumi_alicloud-3.77.0a1746163013.dist-info → pulumi_alicloud-3.78.0a1746422202.dist-info}/top_level.txt +0 -0
@@ -32,6 +32,8 @@ __all__ = [
32
32
  'ClusterNetworksVpdInfo',
33
33
  'ClusterNodeGroup',
34
34
  'ClusterNodeGroupNode',
35
+ 'ExperimentPlanTemplateTemplatePipeline',
36
+ 'ExperimentPlanTemplateTemplatePipelineEnvParams',
35
37
  'NodeGroupIpAllocationPolicy',
36
38
  'NodeGroupIpAllocationPolicyBondPolicy',
37
39
  'NodeGroupIpAllocationPolicyBondPolicyBond',
@@ -40,6 +42,8 @@ __all__ = [
40
42
  'NodeGroupIpAllocationPolicyNodePolicy',
41
43
  'NodeGroupIpAllocationPolicyNodePolicyBond',
42
44
  'NodeGroupNode',
45
+ 'ResourceMachineTypes',
46
+ 'ResourceUserAccessParam',
43
47
  'GetSubnetsSubnetResult',
44
48
  'GetVpdsVpdResult',
45
49
  ]
@@ -999,6 +1003,250 @@ class ClusterNodeGroupNode(dict):
999
1003
  return pulumi.get(self, "vswitch_id")
1000
1004
 
1001
1005
 
1006
+ @pulumi.output_type
1007
+ class ExperimentPlanTemplateTemplatePipeline(dict):
1008
+ @staticmethod
1009
+ def __key_warning(key: str):
1010
+ suggest = None
1011
+ if key == "envParams":
1012
+ suggest = "env_params"
1013
+ elif key == "pipelineOrder":
1014
+ suggest = "pipeline_order"
1015
+ elif key == "workloadId":
1016
+ suggest = "workload_id"
1017
+ elif key == "workloadName":
1018
+ suggest = "workload_name"
1019
+ elif key == "settingParams":
1020
+ suggest = "setting_params"
1021
+
1022
+ if suggest:
1023
+ pulumi.log.warn(f"Key '{key}' not found in ExperimentPlanTemplateTemplatePipeline. Access the value via the '{suggest}' property getter instead.")
1024
+
1025
+ def __getitem__(self, key: str) -> Any:
1026
+ ExperimentPlanTemplateTemplatePipeline.__key_warning(key)
1027
+ return super().__getitem__(key)
1028
+
1029
+ def get(self, key: str, default = None) -> Any:
1030
+ ExperimentPlanTemplateTemplatePipeline.__key_warning(key)
1031
+ return super().get(key, default)
1032
+
1033
+ def __init__(__self__, *,
1034
+ env_params: 'outputs.ExperimentPlanTemplateTemplatePipelineEnvParams',
1035
+ pipeline_order: builtins.int,
1036
+ scene: builtins.str,
1037
+ workload_id: builtins.int,
1038
+ workload_name: builtins.str,
1039
+ setting_params: Optional[Mapping[str, builtins.str]] = None):
1040
+ """
1041
+ :param 'ExperimentPlanTemplateTemplatePipelineEnvParamsArgs' env_params: Contains a series of parameters related to the environment. See `env_params` below.
1042
+ :param builtins.int pipeline_order: Indicates the sequence number of the pipeline node.
1043
+ :param builtins.str scene: The use of the template scenario. It can have the following optional parameters:
1044
+ - baseline: benchmark evaluation
1045
+ :param builtins.int workload_id: Used to uniquely identify a specific payload.
1046
+ :param builtins.str workload_name: The name used to represent a specific payload.
1047
+ :param Mapping[str, builtins.str] setting_params: Represents additional parameters for the run.
1048
+ """
1049
+ pulumi.set(__self__, "env_params", env_params)
1050
+ pulumi.set(__self__, "pipeline_order", pipeline_order)
1051
+ pulumi.set(__self__, "scene", scene)
1052
+ pulumi.set(__self__, "workload_id", workload_id)
1053
+ pulumi.set(__self__, "workload_name", workload_name)
1054
+ if setting_params is not None:
1055
+ pulumi.set(__self__, "setting_params", setting_params)
1056
+
1057
+ @property
1058
+ @pulumi.getter(name="envParams")
1059
+ def env_params(self) -> 'outputs.ExperimentPlanTemplateTemplatePipelineEnvParams':
1060
+ """
1061
+ Contains a series of parameters related to the environment. See `env_params` below.
1062
+ """
1063
+ return pulumi.get(self, "env_params")
1064
+
1065
+ @property
1066
+ @pulumi.getter(name="pipelineOrder")
1067
+ def pipeline_order(self) -> builtins.int:
1068
+ """
1069
+ Indicates the sequence number of the pipeline node.
1070
+ """
1071
+ return pulumi.get(self, "pipeline_order")
1072
+
1073
+ @property
1074
+ @pulumi.getter
1075
+ def scene(self) -> builtins.str:
1076
+ """
1077
+ The use of the template scenario. It can have the following optional parameters:
1078
+ - baseline: benchmark evaluation
1079
+ """
1080
+ return pulumi.get(self, "scene")
1081
+
1082
+ @property
1083
+ @pulumi.getter(name="workloadId")
1084
+ def workload_id(self) -> builtins.int:
1085
+ """
1086
+ Used to uniquely identify a specific payload.
1087
+ """
1088
+ return pulumi.get(self, "workload_id")
1089
+
1090
+ @property
1091
+ @pulumi.getter(name="workloadName")
1092
+ def workload_name(self) -> builtins.str:
1093
+ """
1094
+ The name used to represent a specific payload.
1095
+ """
1096
+ return pulumi.get(self, "workload_name")
1097
+
1098
+ @property
1099
+ @pulumi.getter(name="settingParams")
1100
+ def setting_params(self) -> Optional[Mapping[str, builtins.str]]:
1101
+ """
1102
+ Represents additional parameters for the run.
1103
+ """
1104
+ return pulumi.get(self, "setting_params")
1105
+
1106
+
1107
+ @pulumi.output_type
1108
+ class ExperimentPlanTemplateTemplatePipelineEnvParams(dict):
1109
+ @staticmethod
1110
+ def __key_warning(key: str):
1111
+ suggest = None
1112
+ if key == "cpuPerWorker":
1113
+ suggest = "cpu_per_worker"
1114
+ elif key == "gpuPerWorker":
1115
+ suggest = "gpu_per_worker"
1116
+ elif key == "memoryPerWorker":
1117
+ suggest = "memory_per_worker"
1118
+ elif key == "shareMemory":
1119
+ suggest = "share_memory"
1120
+ elif key == "workerNum":
1121
+ suggest = "worker_num"
1122
+ elif key == "cudaVersion":
1123
+ suggest = "cuda_version"
1124
+ elif key == "gpuDriverVersion":
1125
+ suggest = "gpu_driver_version"
1126
+ elif key == "ncclVersion":
1127
+ suggest = "nccl_version"
1128
+ elif key == "pyTorchVersion":
1129
+ suggest = "py_torch_version"
1130
+
1131
+ if suggest:
1132
+ pulumi.log.warn(f"Key '{key}' not found in ExperimentPlanTemplateTemplatePipelineEnvParams. Access the value via the '{suggest}' property getter instead.")
1133
+
1134
+ def __getitem__(self, key: str) -> Any:
1135
+ ExperimentPlanTemplateTemplatePipelineEnvParams.__key_warning(key)
1136
+ return super().__getitem__(key)
1137
+
1138
+ def get(self, key: str, default = None) -> Any:
1139
+ ExperimentPlanTemplateTemplatePipelineEnvParams.__key_warning(key)
1140
+ return super().get(key, default)
1141
+
1142
+ def __init__(__self__, *,
1143
+ cpu_per_worker: builtins.int,
1144
+ gpu_per_worker: builtins.int,
1145
+ memory_per_worker: builtins.int,
1146
+ share_memory: builtins.int,
1147
+ worker_num: builtins.int,
1148
+ cuda_version: Optional[builtins.str] = None,
1149
+ gpu_driver_version: Optional[builtins.str] = None,
1150
+ nccl_version: Optional[builtins.str] = None,
1151
+ py_torch_version: Optional[builtins.str] = None):
1152
+ """
1153
+ :param builtins.int cpu_per_worker: Number of central processing units (CPUs) allocated. This parameter affects the processing power of the computation, especially in tasks that require a large amount of parallel processing.
1154
+ :param builtins.int gpu_per_worker: Number of graphics processing units (GPUs). GPUs are a key component in deep learning and large-scale data processing, so this parameter is very important for tasks that require graphics-accelerated computing.
1155
+ :param builtins.int memory_per_worker: The amount of memory available. Memory size has an important impact on the performance and stability of the program, especially when dealing with large data sets or high-dimensional data.
1156
+ :param builtins.int share_memory: Shared memory GB allocation
1157
+ :param builtins.int worker_num: The total number of nodes. This parameter directly affects the parallelism and computing speed of the task, and a higher number of working nodes usually accelerates the completion of the task.
1158
+ :param builtins.str cuda_version: The version of CUDA(Compute Unified Device Architecture) used. CUDA is a parallel computing platform and programming model provided by NVIDIA. A specific version may affect the available GPU functions and performance optimization.
1159
+ :param builtins.str gpu_driver_version: The version of the GPU driver used. Driver version may affect GPU performance and compatibility, so it is important to ensure that the correct version is used
1160
+ :param builtins.str nccl_version: The NVIDIA Collective Communications Library(NCCL) version used. NCCL is a library for multi-GPU and multi-node communication. This parameter is particularly important for optimizing data transmission in distributed computing.
1161
+ :param builtins.str py_torch_version: The version of the PyTorch framework used. PyTorch is a widely used deep learning library, and differences between versions may affect the performance and functional support of model training and inference.
1162
+ """
1163
+ pulumi.set(__self__, "cpu_per_worker", cpu_per_worker)
1164
+ pulumi.set(__self__, "gpu_per_worker", gpu_per_worker)
1165
+ pulumi.set(__self__, "memory_per_worker", memory_per_worker)
1166
+ pulumi.set(__self__, "share_memory", share_memory)
1167
+ pulumi.set(__self__, "worker_num", worker_num)
1168
+ if cuda_version is not None:
1169
+ pulumi.set(__self__, "cuda_version", cuda_version)
1170
+ if gpu_driver_version is not None:
1171
+ pulumi.set(__self__, "gpu_driver_version", gpu_driver_version)
1172
+ if nccl_version is not None:
1173
+ pulumi.set(__self__, "nccl_version", nccl_version)
1174
+ if py_torch_version is not None:
1175
+ pulumi.set(__self__, "py_torch_version", py_torch_version)
1176
+
1177
+ @property
1178
+ @pulumi.getter(name="cpuPerWorker")
1179
+ def cpu_per_worker(self) -> builtins.int:
1180
+ """
1181
+ Number of central processing units (CPUs) allocated. This parameter affects the processing power of the computation, especially in tasks that require a large amount of parallel processing.
1182
+ """
1183
+ return pulumi.get(self, "cpu_per_worker")
1184
+
1185
+ @property
1186
+ @pulumi.getter(name="gpuPerWorker")
1187
+ def gpu_per_worker(self) -> builtins.int:
1188
+ """
1189
+ Number of graphics processing units (GPUs). GPUs are a key component in deep learning and large-scale data processing, so this parameter is very important for tasks that require graphics-accelerated computing.
1190
+ """
1191
+ return pulumi.get(self, "gpu_per_worker")
1192
+
1193
+ @property
1194
+ @pulumi.getter(name="memoryPerWorker")
1195
+ def memory_per_worker(self) -> builtins.int:
1196
+ """
1197
+ The amount of memory available. Memory size has an important impact on the performance and stability of the program, especially when dealing with large data sets or high-dimensional data.
1198
+ """
1199
+ return pulumi.get(self, "memory_per_worker")
1200
+
1201
+ @property
1202
+ @pulumi.getter(name="shareMemory")
1203
+ def share_memory(self) -> builtins.int:
1204
+ """
1205
+ Shared memory GB allocation
1206
+ """
1207
+ return pulumi.get(self, "share_memory")
1208
+
1209
+ @property
1210
+ @pulumi.getter(name="workerNum")
1211
+ def worker_num(self) -> builtins.int:
1212
+ """
1213
+ The total number of nodes. This parameter directly affects the parallelism and computing speed of the task, and a higher number of working nodes usually accelerates the completion of the task.
1214
+ """
1215
+ return pulumi.get(self, "worker_num")
1216
+
1217
+ @property
1218
+ @pulumi.getter(name="cudaVersion")
1219
+ def cuda_version(self) -> Optional[builtins.str]:
1220
+ """
1221
+ The version of CUDA(Compute Unified Device Architecture) used. CUDA is a parallel computing platform and programming model provided by NVIDIA. A specific version may affect the available GPU functions and performance optimization.
1222
+ """
1223
+ return pulumi.get(self, "cuda_version")
1224
+
1225
+ @property
1226
+ @pulumi.getter(name="gpuDriverVersion")
1227
+ def gpu_driver_version(self) -> Optional[builtins.str]:
1228
+ """
1229
+ The version of the GPU driver used. Driver version may affect GPU performance and compatibility, so it is important to ensure that the correct version is used
1230
+ """
1231
+ return pulumi.get(self, "gpu_driver_version")
1232
+
1233
+ @property
1234
+ @pulumi.getter(name="ncclVersion")
1235
+ def nccl_version(self) -> Optional[builtins.str]:
1236
+ """
1237
+ The NVIDIA Collective Communications Library(NCCL) version used. NCCL is a library for multi-GPU and multi-node communication. This parameter is particularly important for optimizing data transmission in distributed computing.
1238
+ """
1239
+ return pulumi.get(self, "nccl_version")
1240
+
1241
+ @property
1242
+ @pulumi.getter(name="pyTorchVersion")
1243
+ def py_torch_version(self) -> Optional[builtins.str]:
1244
+ """
1245
+ The version of the PyTorch framework used. PyTorch is a widely used deep learning library, and differences between versions may affect the performance and functional support of model training and inference.
1246
+ """
1247
+ return pulumi.get(self, "py_torch_version")
1248
+
1249
+
1002
1250
  @pulumi.output_type
1003
1251
  class NodeGroupIpAllocationPolicy(dict):
1004
1252
  @staticmethod
@@ -1386,6 +1634,234 @@ class NodeGroupNode(dict):
1386
1634
  return pulumi.get(self, "vswitch_id")
1387
1635
 
1388
1636
 
1637
+ @pulumi.output_type
1638
+ class ResourceMachineTypes(dict):
1639
+ @staticmethod
1640
+ def __key_warning(key: str):
1641
+ suggest = None
1642
+ if key == "cpuInfo":
1643
+ suggest = "cpu_info"
1644
+ elif key == "gpuInfo":
1645
+ suggest = "gpu_info"
1646
+ elif key == "bondNum":
1647
+ suggest = "bond_num"
1648
+ elif key == "diskInfo":
1649
+ suggest = "disk_info"
1650
+ elif key == "memoryInfo":
1651
+ suggest = "memory_info"
1652
+ elif key == "networkInfo":
1653
+ suggest = "network_info"
1654
+ elif key == "networkMode":
1655
+ suggest = "network_mode"
1656
+ elif key == "nodeCount":
1657
+ suggest = "node_count"
1658
+
1659
+ if suggest:
1660
+ pulumi.log.warn(f"Key '{key}' not found in ResourceMachineTypes. Access the value via the '{suggest}' property getter instead.")
1661
+
1662
+ def __getitem__(self, key: str) -> Any:
1663
+ ResourceMachineTypes.__key_warning(key)
1664
+ return super().__getitem__(key)
1665
+
1666
+ def get(self, key: str, default = None) -> Any:
1667
+ ResourceMachineTypes.__key_warning(key)
1668
+ return super().get(key, default)
1669
+
1670
+ def __init__(__self__, *,
1671
+ cpu_info: builtins.str,
1672
+ gpu_info: builtins.str,
1673
+ bond_num: Optional[builtins.int] = None,
1674
+ disk_info: Optional[builtins.str] = None,
1675
+ memory_info: Optional[builtins.str] = None,
1676
+ name: Optional[builtins.str] = None,
1677
+ network_info: Optional[builtins.str] = None,
1678
+ network_mode: Optional[builtins.str] = None,
1679
+ node_count: Optional[builtins.int] = None,
1680
+ type: Optional[builtins.str] = None):
1681
+ """
1682
+ :param builtins.str cpu_info: Provides CPU details, including the number of cores, number of threads, clock frequency, and architecture type. This information helps to evaluate the processing power and identify whether it can meet the performance requirements of a particular application.
1683
+ :param builtins.str gpu_info: Provides detailed information about the GPU, including the number, model, memory size, and computing capability. This information is particularly important for tasks such as deep learning, scientific computing, and graph processing, helping users understand the graph processing capabilities of nodes.
1684
+ :param builtins.int bond_num: This property specifies the number of network bindings, which relates to the number of physical or virtual network cards connected to the network through the network interface card (NIC). Multiple network bindings can increase bandwidth and redundancy and improve network reliability.
1685
+ :param builtins.str disk_info: Displays information about the storage device, including the disk type (such as SSD or HDD), capacity, and I/O performance. Storage performance is critical in data-intensive applications such as big data processing and databases.
1686
+ :param builtins.str memory_info: This property provides memory details, including total memory, available memory, and usage. This helps users understand the memory processing capabilities of compute nodes, especially when running heavy-duty applications.
1687
+ :param builtins.str name: Specification Name.
1688
+ :param builtins.str network_info: Contains detailed information about the network interface, such as network bandwidth, latency, protocol types supported by the network, IP addresses, and network topology. Optimizing network information is essential to ensure efficient data transmission and low latency.
1689
+ :param builtins.str network_mode: Specifies the network mode, such as bridge mode, NAT mode, or direct connection mode. Different network modes affect the network configuration and data transmission performance of nodes, and affect the network access methods of computing instances.
1690
+ :param builtins.int node_count: Specifies the total number of compute nodes. This property is particularly important in distributed computing and cluster environments, because the number of nodes often directly affects the computing power and the ability to parallel processing.
1691
+ :param builtins.str type: Usually refers to a specific resource type (such as virtual machine, physical server, container, etc.), which is used to distinguish different computing units or resource categories.
1692
+ """
1693
+ pulumi.set(__self__, "cpu_info", cpu_info)
1694
+ pulumi.set(__self__, "gpu_info", gpu_info)
1695
+ if bond_num is not None:
1696
+ pulumi.set(__self__, "bond_num", bond_num)
1697
+ if disk_info is not None:
1698
+ pulumi.set(__self__, "disk_info", disk_info)
1699
+ if memory_info is not None:
1700
+ pulumi.set(__self__, "memory_info", memory_info)
1701
+ if name is not None:
1702
+ pulumi.set(__self__, "name", name)
1703
+ if network_info is not None:
1704
+ pulumi.set(__self__, "network_info", network_info)
1705
+ if network_mode is not None:
1706
+ pulumi.set(__self__, "network_mode", network_mode)
1707
+ if node_count is not None:
1708
+ pulumi.set(__self__, "node_count", node_count)
1709
+ if type is not None:
1710
+ pulumi.set(__self__, "type", type)
1711
+
1712
+ @property
1713
+ @pulumi.getter(name="cpuInfo")
1714
+ def cpu_info(self) -> builtins.str:
1715
+ """
1716
+ Provides CPU details, including the number of cores, number of threads, clock frequency, and architecture type. This information helps to evaluate the processing power and identify whether it can meet the performance requirements of a particular application.
1717
+ """
1718
+ return pulumi.get(self, "cpu_info")
1719
+
1720
+ @property
1721
+ @pulumi.getter(name="gpuInfo")
1722
+ def gpu_info(self) -> builtins.str:
1723
+ """
1724
+ Provides detailed information about the GPU, including the number, model, memory size, and computing capability. This information is particularly important for tasks such as deep learning, scientific computing, and graph processing, helping users understand the graph processing capabilities of nodes.
1725
+ """
1726
+ return pulumi.get(self, "gpu_info")
1727
+
1728
+ @property
1729
+ @pulumi.getter(name="bondNum")
1730
+ def bond_num(self) -> Optional[builtins.int]:
1731
+ """
1732
+ This property specifies the number of network bindings, which relates to the number of physical or virtual network cards connected to the network through the network interface card (NIC). Multiple network bindings can increase bandwidth and redundancy and improve network reliability.
1733
+ """
1734
+ return pulumi.get(self, "bond_num")
1735
+
1736
+ @property
1737
+ @pulumi.getter(name="diskInfo")
1738
+ def disk_info(self) -> Optional[builtins.str]:
1739
+ """
1740
+ Displays information about the storage device, including the disk type (such as SSD or HDD), capacity, and I/O performance. Storage performance is critical in data-intensive applications such as big data processing and databases.
1741
+ """
1742
+ return pulumi.get(self, "disk_info")
1743
+
1744
+ @property
1745
+ @pulumi.getter(name="memoryInfo")
1746
+ def memory_info(self) -> Optional[builtins.str]:
1747
+ """
1748
+ This property provides memory details, including total memory, available memory, and usage. This helps users understand the memory processing capabilities of compute nodes, especially when running heavy-duty applications.
1749
+ """
1750
+ return pulumi.get(self, "memory_info")
1751
+
1752
+ @property
1753
+ @pulumi.getter
1754
+ def name(self) -> Optional[builtins.str]:
1755
+ """
1756
+ Specification Name.
1757
+ """
1758
+ return pulumi.get(self, "name")
1759
+
1760
+ @property
1761
+ @pulumi.getter(name="networkInfo")
1762
+ def network_info(self) -> Optional[builtins.str]:
1763
+ """
1764
+ Contains detailed information about the network interface, such as network bandwidth, latency, protocol types supported by the network, IP addresses, and network topology. Optimizing network information is essential to ensure efficient data transmission and low latency.
1765
+ """
1766
+ return pulumi.get(self, "network_info")
1767
+
1768
+ @property
1769
+ @pulumi.getter(name="networkMode")
1770
+ def network_mode(self) -> Optional[builtins.str]:
1771
+ """
1772
+ Specifies the network mode, such as bridge mode, NAT mode, or direct connection mode. Different network modes affect the network configuration and data transmission performance of nodes, and affect the network access methods of computing instances.
1773
+ """
1774
+ return pulumi.get(self, "network_mode")
1775
+
1776
+ @property
1777
+ @pulumi.getter(name="nodeCount")
1778
+ def node_count(self) -> Optional[builtins.int]:
1779
+ """
1780
+ Specifies the total number of compute nodes. This property is particularly important in distributed computing and cluster environments, because the number of nodes often directly affects the computing power and the ability to parallel processing.
1781
+ """
1782
+ return pulumi.get(self, "node_count")
1783
+
1784
+ @property
1785
+ @pulumi.getter
1786
+ def type(self) -> Optional[builtins.str]:
1787
+ """
1788
+ Usually refers to a specific resource type (such as virtual machine, physical server, container, etc.), which is used to distinguish different computing units or resource categories.
1789
+ """
1790
+ return pulumi.get(self, "type")
1791
+
1792
+
1793
+ @pulumi.output_type
1794
+ class ResourceUserAccessParam(dict):
1795
+ @staticmethod
1796
+ def __key_warning(key: str):
1797
+ suggest = None
1798
+ if key == "accessId":
1799
+ suggest = "access_id"
1800
+ elif key == "accessKey":
1801
+ suggest = "access_key"
1802
+ elif key == "workspaceId":
1803
+ suggest = "workspace_id"
1804
+
1805
+ if suggest:
1806
+ pulumi.log.warn(f"Key '{key}' not found in ResourceUserAccessParam. Access the value via the '{suggest}' property getter instead.")
1807
+
1808
+ def __getitem__(self, key: str) -> Any:
1809
+ ResourceUserAccessParam.__key_warning(key)
1810
+ return super().__getitem__(key)
1811
+
1812
+ def get(self, key: str, default = None) -> Any:
1813
+ ResourceUserAccessParam.__key_warning(key)
1814
+ return super().get(key, default)
1815
+
1816
+ def __init__(__self__, *,
1817
+ access_id: builtins.str,
1818
+ access_key: builtins.str,
1819
+ endpoint: builtins.str,
1820
+ workspace_id: builtins.str):
1821
+ """
1822
+ :param builtins.str access_id: Access keys are important credentials for authentication.
1823
+ :param builtins.str access_key: A Secret Key is a Secret credential paired with an access Key to verify a user's identity and protect the security of an interface.
1824
+ :param builtins.str endpoint: An Endpoint is a network address for accessing a service or API, usually a URL to a specific service instance.
1825
+ :param builtins.str workspace_id: A Workspace generally refers to a separate space created by a user on a particular computing environment or platform.
1826
+ """
1827
+ pulumi.set(__self__, "access_id", access_id)
1828
+ pulumi.set(__self__, "access_key", access_key)
1829
+ pulumi.set(__self__, "endpoint", endpoint)
1830
+ pulumi.set(__self__, "workspace_id", workspace_id)
1831
+
1832
+ @property
1833
+ @pulumi.getter(name="accessId")
1834
+ def access_id(self) -> builtins.str:
1835
+ """
1836
+ Access keys are important credentials for authentication.
1837
+ """
1838
+ return pulumi.get(self, "access_id")
1839
+
1840
+ @property
1841
+ @pulumi.getter(name="accessKey")
1842
+ def access_key(self) -> builtins.str:
1843
+ """
1844
+ A Secret Key is a Secret credential paired with an access Key to verify a user's identity and protect the security of an interface.
1845
+ """
1846
+ return pulumi.get(self, "access_key")
1847
+
1848
+ @property
1849
+ @pulumi.getter
1850
+ def endpoint(self) -> builtins.str:
1851
+ """
1852
+ An Endpoint is a network address for accessing a service or API, usually a URL to a specific service instance.
1853
+ """
1854
+ return pulumi.get(self, "endpoint")
1855
+
1856
+ @property
1857
+ @pulumi.getter(name="workspaceId")
1858
+ def workspace_id(self) -> builtins.str:
1859
+ """
1860
+ A Workspace generally refers to a separate space created by a user on a particular computing environment or platform.
1861
+ """
1862
+ return pulumi.get(self, "workspace_id")
1863
+
1864
+
1389
1865
  @pulumi.output_type
1390
1866
  class GetSubnetsSubnetResult(dict):
1391
1867
  def __init__(__self__, *,