pulumi-alicloud 3.77.0a1746076596__py3-none-any.whl → 3.77.0a1746220593__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-alicloud might be problematic. Click here for more details.
- pulumi_alicloud/__init__.py +72 -0
- pulumi_alicloud/_inputs.py +13 -0
- pulumi_alicloud/adb/db_cluster_lake_version.py +94 -0
- pulumi_alicloud/alb/_inputs.py +6 -3
- pulumi_alicloud/alb/outputs.py +4 -2
- pulumi_alicloud/apig/environment.py +2 -2
- pulumi_alicloud/apig/http_api.py +2 -2
- pulumi_alicloud/arms/grafana_workspace.py +56 -14
- pulumi_alicloud/cloudfirewall/instance_member.py +4 -4
- pulumi_alicloud/cloudfirewall/vpc_cen_tr_firewall.py +2 -2
- pulumi_alicloud/cloudsso/_inputs.py +697 -7
- pulumi_alicloud/cloudsso/directory.py +345 -65
- pulumi_alicloud/cloudsso/outputs.py +557 -8
- pulumi_alicloud/config/outputs.py +8 -0
- pulumi_alicloud/cs/_inputs.py +18 -18
- pulumi_alicloud/cs/edge_kubernetes.py +136 -100
- pulumi_alicloud/cs/get_kubernetes_node_pools.py +21 -1
- pulumi_alicloud/cs/kubernetes.py +118 -39
- pulumi_alicloud/cs/managed_kubernetes.py +125 -46
- pulumi_alicloud/cs/outputs.py +14 -14
- pulumi_alicloud/cs/serverless_kubernetes.py +66 -73
- pulumi_alicloud/ddos/ddos_coo_instance.py +175 -25
- pulumi_alicloud/dns/ddos_coo_instance.py +175 -25
- pulumi_alicloud/dts/job_monitor_rule.py +2 -2
- pulumi_alicloud/dts/synchronization_job.py +2 -2
- pulumi_alicloud/ecs/get_instance_types.py +4 -4
- pulumi_alicloud/ecs/instance.py +28 -28
- pulumi_alicloud/ecs/outputs.py +2 -2
- pulumi_alicloud/ecs/security_group_rule.py +32 -4
- pulumi_alicloud/eflo/__init__.py +3 -0
- pulumi_alicloud/eflo/_inputs.py +623 -0
- pulumi_alicloud/eflo/experiment_plan.py +573 -0
- pulumi_alicloud/eflo/experiment_plan_template.py +464 -0
- pulumi_alicloud/eflo/outputs.py +476 -0
- pulumi_alicloud/eflo/resource.py +388 -0
- pulumi_alicloud/ens/disk.py +120 -69
- pulumi_alicloud/ens/eip.py +45 -41
- pulumi_alicloud/esa/__init__.py +2 -0
- pulumi_alicloud/esa/scheduled_preload_execution.py +479 -0
- pulumi_alicloud/esa/scheduled_preload_job.py +467 -0
- pulumi_alicloud/gwlb/listener.py +2 -2
- pulumi_alicloud/gwlb/load_balancer.py +2 -2
- pulumi_alicloud/gwlb/server_group.py +2 -2
- pulumi_alicloud/ims/__init__.py +2 -0
- pulumi_alicloud/ims/get_oidc_providers.py +216 -0
- pulumi_alicloud/ims/outputs.py +138 -0
- pulumi_alicloud/mongodb/__init__.py +2 -0
- pulumi_alicloud/mongodb/_inputs.py +154 -0
- pulumi_alicloud/mongodb/instance.py +7 -7
- pulumi_alicloud/mongodb/outputs.py +121 -0
- pulumi_alicloud/mongodb/public_network_address.py +275 -0
- pulumi_alicloud/mongodb/replica_set_role.py +533 -0
- pulumi_alicloud/nas/_inputs.py +252 -18
- pulumi_alicloud/nas/file_system.py +649 -264
- pulumi_alicloud/nas/outputs.py +198 -12
- pulumi_alicloud/nlb/server_group_server_attachment.py +4 -0
- pulumi_alicloud/pai/__init__.py +1 -0
- pulumi_alicloud/pai/flow_pipeline.py +491 -0
- pulumi_alicloud/pulumi-plugin.json +1 -1
- pulumi_alicloud/ram/__init__.py +1 -0
- pulumi_alicloud/ram/get_role_policy_attachments.py +272 -0
- pulumi_alicloud/ram/outputs.py +63 -0
- pulumi_alicloud/ram/security_preference.py +496 -110
- pulumi_alicloud/rdc/organization.py +2 -2
- pulumi_alicloud/rds/instance.py +1 -1
- pulumi_alicloud/sae/application_scaling_rule.py +2 -2
- pulumi_alicloud/sae/ingress.py +2 -2
- pulumi_alicloud/schedulerx/app_group.py +2 -2
- pulumi_alicloud/schedulerx/job.py +2 -2
- pulumi_alicloud/selectdb/db_cluster.py +2 -0
- pulumi_alicloud/selectdb/db_instance.py +43 -13
- pulumi_alicloud/selectdb/get_db_clusters.py +2 -0
- pulumi_alicloud/selectdb/get_db_instances.py +2 -0
- pulumi_alicloud/selectdb/outputs.py +3 -3
- pulumi_alicloud/sls/__init__.py +1 -0
- pulumi_alicloud/sls/_inputs.py +295 -0
- pulumi_alicloud/sls/etl.py +516 -0
- pulumi_alicloud/sls/outputs.py +209 -0
- pulumi_alicloud/vpc/network.py +156 -88
- {pulumi_alicloud-3.77.0a1746076596.dist-info → pulumi_alicloud-3.77.0a1746220593.dist-info}/METADATA +1 -1
- {pulumi_alicloud-3.77.0a1746076596.dist-info → pulumi_alicloud-3.77.0a1746220593.dist-info}/RECORD +83 -71
- {pulumi_alicloud-3.77.0a1746076596.dist-info → pulumi_alicloud-3.77.0a1746220593.dist-info}/WHEEL +0 -0
- {pulumi_alicloud-3.77.0a1746076596.dist-info → pulumi_alicloud-3.77.0a1746220593.dist-info}/top_level.txt +0 -0
pulumi_alicloud/eflo/_inputs.py
CHANGED
|
@@ -46,6 +46,10 @@ __all__ = [
|
|
|
46
46
|
'ClusterNodeGroupArgsDict',
|
|
47
47
|
'ClusterNodeGroupNodeArgs',
|
|
48
48
|
'ClusterNodeGroupNodeArgsDict',
|
|
49
|
+
'ExperimentPlanTemplateTemplatePipelineArgs',
|
|
50
|
+
'ExperimentPlanTemplateTemplatePipelineArgsDict',
|
|
51
|
+
'ExperimentPlanTemplateTemplatePipelineEnvParamsArgs',
|
|
52
|
+
'ExperimentPlanTemplateTemplatePipelineEnvParamsArgsDict',
|
|
49
53
|
'NodeGroupIpAllocationPolicyArgs',
|
|
50
54
|
'NodeGroupIpAllocationPolicyArgsDict',
|
|
51
55
|
'NodeGroupIpAllocationPolicyBondPolicyArgs',
|
|
@@ -62,6 +66,10 @@ __all__ = [
|
|
|
62
66
|
'NodeGroupIpAllocationPolicyNodePolicyBondArgsDict',
|
|
63
67
|
'NodeGroupNodeArgs',
|
|
64
68
|
'NodeGroupNodeArgsDict',
|
|
69
|
+
'ResourceMachineTypesArgs',
|
|
70
|
+
'ResourceMachineTypesArgsDict',
|
|
71
|
+
'ResourceUserAccessParamArgs',
|
|
72
|
+
'ResourceUserAccessParamArgsDict',
|
|
65
73
|
]
|
|
66
74
|
|
|
67
75
|
MYPY = False
|
|
@@ -1227,6 +1235,323 @@ class ClusterNodeGroupNodeArgs:
|
|
|
1227
1235
|
pulumi.set(self, "vswitch_id", value)
|
|
1228
1236
|
|
|
1229
1237
|
|
|
1238
|
+
if not MYPY:
|
|
1239
|
+
class ExperimentPlanTemplateTemplatePipelineArgsDict(TypedDict):
|
|
1240
|
+
env_params: pulumi.Input['ExperimentPlanTemplateTemplatePipelineEnvParamsArgsDict']
|
|
1241
|
+
"""
|
|
1242
|
+
Contains a series of parameters related to the environment. See `env_params` below.
|
|
1243
|
+
"""
|
|
1244
|
+
pipeline_order: pulumi.Input[builtins.int]
|
|
1245
|
+
"""
|
|
1246
|
+
Indicates the sequence number of the pipeline node.
|
|
1247
|
+
"""
|
|
1248
|
+
scene: pulumi.Input[builtins.str]
|
|
1249
|
+
"""
|
|
1250
|
+
The use of the template scenario. It can have the following optional parameters:
|
|
1251
|
+
- baseline: benchmark evaluation
|
|
1252
|
+
"""
|
|
1253
|
+
workload_id: pulumi.Input[builtins.int]
|
|
1254
|
+
"""
|
|
1255
|
+
Used to uniquely identify a specific payload.
|
|
1256
|
+
"""
|
|
1257
|
+
workload_name: pulumi.Input[builtins.str]
|
|
1258
|
+
"""
|
|
1259
|
+
The name used to represent a specific payload.
|
|
1260
|
+
"""
|
|
1261
|
+
setting_params: NotRequired[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]
|
|
1262
|
+
"""
|
|
1263
|
+
Represents additional parameters for the run.
|
|
1264
|
+
"""
|
|
1265
|
+
elif False:
|
|
1266
|
+
ExperimentPlanTemplateTemplatePipelineArgsDict: TypeAlias = Mapping[str, Any]
|
|
1267
|
+
|
|
1268
|
+
@pulumi.input_type
|
|
1269
|
+
class ExperimentPlanTemplateTemplatePipelineArgs:
|
|
1270
|
+
def __init__(__self__, *,
|
|
1271
|
+
env_params: pulumi.Input['ExperimentPlanTemplateTemplatePipelineEnvParamsArgs'],
|
|
1272
|
+
pipeline_order: pulumi.Input[builtins.int],
|
|
1273
|
+
scene: pulumi.Input[builtins.str],
|
|
1274
|
+
workload_id: pulumi.Input[builtins.int],
|
|
1275
|
+
workload_name: pulumi.Input[builtins.str],
|
|
1276
|
+
setting_params: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None):
|
|
1277
|
+
"""
|
|
1278
|
+
:param pulumi.Input['ExperimentPlanTemplateTemplatePipelineEnvParamsArgs'] env_params: Contains a series of parameters related to the environment. See `env_params` below.
|
|
1279
|
+
:param pulumi.Input[builtins.int] pipeline_order: Indicates the sequence number of the pipeline node.
|
|
1280
|
+
:param pulumi.Input[builtins.str] scene: The use of the template scenario. It can have the following optional parameters:
|
|
1281
|
+
- baseline: benchmark evaluation
|
|
1282
|
+
:param pulumi.Input[builtins.int] workload_id: Used to uniquely identify a specific payload.
|
|
1283
|
+
:param pulumi.Input[builtins.str] workload_name: The name used to represent a specific payload.
|
|
1284
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] setting_params: Represents additional parameters for the run.
|
|
1285
|
+
"""
|
|
1286
|
+
pulumi.set(__self__, "env_params", env_params)
|
|
1287
|
+
pulumi.set(__self__, "pipeline_order", pipeline_order)
|
|
1288
|
+
pulumi.set(__self__, "scene", scene)
|
|
1289
|
+
pulumi.set(__self__, "workload_id", workload_id)
|
|
1290
|
+
pulumi.set(__self__, "workload_name", workload_name)
|
|
1291
|
+
if setting_params is not None:
|
|
1292
|
+
pulumi.set(__self__, "setting_params", setting_params)
|
|
1293
|
+
|
|
1294
|
+
@property
|
|
1295
|
+
@pulumi.getter(name="envParams")
|
|
1296
|
+
def env_params(self) -> pulumi.Input['ExperimentPlanTemplateTemplatePipelineEnvParamsArgs']:
|
|
1297
|
+
"""
|
|
1298
|
+
Contains a series of parameters related to the environment. See `env_params` below.
|
|
1299
|
+
"""
|
|
1300
|
+
return pulumi.get(self, "env_params")
|
|
1301
|
+
|
|
1302
|
+
@env_params.setter
|
|
1303
|
+
def env_params(self, value: pulumi.Input['ExperimentPlanTemplateTemplatePipelineEnvParamsArgs']):
|
|
1304
|
+
pulumi.set(self, "env_params", value)
|
|
1305
|
+
|
|
1306
|
+
@property
|
|
1307
|
+
@pulumi.getter(name="pipelineOrder")
|
|
1308
|
+
def pipeline_order(self) -> pulumi.Input[builtins.int]:
|
|
1309
|
+
"""
|
|
1310
|
+
Indicates the sequence number of the pipeline node.
|
|
1311
|
+
"""
|
|
1312
|
+
return pulumi.get(self, "pipeline_order")
|
|
1313
|
+
|
|
1314
|
+
@pipeline_order.setter
|
|
1315
|
+
def pipeline_order(self, value: pulumi.Input[builtins.int]):
|
|
1316
|
+
pulumi.set(self, "pipeline_order", value)
|
|
1317
|
+
|
|
1318
|
+
@property
|
|
1319
|
+
@pulumi.getter
|
|
1320
|
+
def scene(self) -> pulumi.Input[builtins.str]:
|
|
1321
|
+
"""
|
|
1322
|
+
The use of the template scenario. It can have the following optional parameters:
|
|
1323
|
+
- baseline: benchmark evaluation
|
|
1324
|
+
"""
|
|
1325
|
+
return pulumi.get(self, "scene")
|
|
1326
|
+
|
|
1327
|
+
@scene.setter
|
|
1328
|
+
def scene(self, value: pulumi.Input[builtins.str]):
|
|
1329
|
+
pulumi.set(self, "scene", value)
|
|
1330
|
+
|
|
1331
|
+
@property
|
|
1332
|
+
@pulumi.getter(name="workloadId")
|
|
1333
|
+
def workload_id(self) -> pulumi.Input[builtins.int]:
|
|
1334
|
+
"""
|
|
1335
|
+
Used to uniquely identify a specific payload.
|
|
1336
|
+
"""
|
|
1337
|
+
return pulumi.get(self, "workload_id")
|
|
1338
|
+
|
|
1339
|
+
@workload_id.setter
|
|
1340
|
+
def workload_id(self, value: pulumi.Input[builtins.int]):
|
|
1341
|
+
pulumi.set(self, "workload_id", value)
|
|
1342
|
+
|
|
1343
|
+
@property
|
|
1344
|
+
@pulumi.getter(name="workloadName")
|
|
1345
|
+
def workload_name(self) -> pulumi.Input[builtins.str]:
|
|
1346
|
+
"""
|
|
1347
|
+
The name used to represent a specific payload.
|
|
1348
|
+
"""
|
|
1349
|
+
return pulumi.get(self, "workload_name")
|
|
1350
|
+
|
|
1351
|
+
@workload_name.setter
|
|
1352
|
+
def workload_name(self, value: pulumi.Input[builtins.str]):
|
|
1353
|
+
pulumi.set(self, "workload_name", value)
|
|
1354
|
+
|
|
1355
|
+
@property
|
|
1356
|
+
@pulumi.getter(name="settingParams")
|
|
1357
|
+
def setting_params(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]:
|
|
1358
|
+
"""
|
|
1359
|
+
Represents additional parameters for the run.
|
|
1360
|
+
"""
|
|
1361
|
+
return pulumi.get(self, "setting_params")
|
|
1362
|
+
|
|
1363
|
+
@setting_params.setter
|
|
1364
|
+
def setting_params(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]):
|
|
1365
|
+
pulumi.set(self, "setting_params", value)
|
|
1366
|
+
|
|
1367
|
+
|
|
1368
|
+
if not MYPY:
|
|
1369
|
+
class ExperimentPlanTemplateTemplatePipelineEnvParamsArgsDict(TypedDict):
|
|
1370
|
+
cpu_per_worker: pulumi.Input[builtins.int]
|
|
1371
|
+
"""
|
|
1372
|
+
Number of central processing units (CPUs) allocated. This parameter affects the processing power of the computation, especially in tasks that require a large amount of parallel processing.
|
|
1373
|
+
"""
|
|
1374
|
+
gpu_per_worker: pulumi.Input[builtins.int]
|
|
1375
|
+
"""
|
|
1376
|
+
Number of graphics processing units (GPUs). GPUs are a key component in deep learning and large-scale data processing, so this parameter is very important for tasks that require graphics-accelerated computing.
|
|
1377
|
+
"""
|
|
1378
|
+
memory_per_worker: pulumi.Input[builtins.int]
|
|
1379
|
+
"""
|
|
1380
|
+
The amount of memory available. Memory size has an important impact on the performance and stability of the program, especially when dealing with large data sets or high-dimensional data.
|
|
1381
|
+
"""
|
|
1382
|
+
share_memory: pulumi.Input[builtins.int]
|
|
1383
|
+
"""
|
|
1384
|
+
Shared memory GB allocation
|
|
1385
|
+
"""
|
|
1386
|
+
worker_num: pulumi.Input[builtins.int]
|
|
1387
|
+
"""
|
|
1388
|
+
The total number of nodes. This parameter directly affects the parallelism and computing speed of the task, and a higher number of working nodes usually accelerates the completion of the task.
|
|
1389
|
+
"""
|
|
1390
|
+
cuda_version: NotRequired[pulumi.Input[builtins.str]]
|
|
1391
|
+
"""
|
|
1392
|
+
The version of CUDA(Compute Unified Device Architecture) used. CUDA is a parallel computing platform and programming model provided by NVIDIA. A specific version may affect the available GPU functions and performance optimization.
|
|
1393
|
+
"""
|
|
1394
|
+
gpu_driver_version: NotRequired[pulumi.Input[builtins.str]]
|
|
1395
|
+
"""
|
|
1396
|
+
The version of the GPU driver used. Driver version may affect GPU performance and compatibility, so it is important to ensure that the correct version is used
|
|
1397
|
+
"""
|
|
1398
|
+
nccl_version: NotRequired[pulumi.Input[builtins.str]]
|
|
1399
|
+
"""
|
|
1400
|
+
The NVIDIA Collective Communications Library(NCCL) version used. NCCL is a library for multi-GPU and multi-node communication. This parameter is particularly important for optimizing data transmission in distributed computing.
|
|
1401
|
+
"""
|
|
1402
|
+
py_torch_version: NotRequired[pulumi.Input[builtins.str]]
|
|
1403
|
+
"""
|
|
1404
|
+
The version of the PyTorch framework used. PyTorch is a widely used deep learning library, and differences between versions may affect the performance and functional support of model training and inference.
|
|
1405
|
+
"""
|
|
1406
|
+
elif False:
|
|
1407
|
+
ExperimentPlanTemplateTemplatePipelineEnvParamsArgsDict: TypeAlias = Mapping[str, Any]
|
|
1408
|
+
|
|
1409
|
+
@pulumi.input_type
|
|
1410
|
+
class ExperimentPlanTemplateTemplatePipelineEnvParamsArgs:
|
|
1411
|
+
def __init__(__self__, *,
|
|
1412
|
+
cpu_per_worker: pulumi.Input[builtins.int],
|
|
1413
|
+
gpu_per_worker: pulumi.Input[builtins.int],
|
|
1414
|
+
memory_per_worker: pulumi.Input[builtins.int],
|
|
1415
|
+
share_memory: pulumi.Input[builtins.int],
|
|
1416
|
+
worker_num: pulumi.Input[builtins.int],
|
|
1417
|
+
cuda_version: Optional[pulumi.Input[builtins.str]] = None,
|
|
1418
|
+
gpu_driver_version: Optional[pulumi.Input[builtins.str]] = None,
|
|
1419
|
+
nccl_version: Optional[pulumi.Input[builtins.str]] = None,
|
|
1420
|
+
py_torch_version: Optional[pulumi.Input[builtins.str]] = None):
|
|
1421
|
+
"""
|
|
1422
|
+
:param pulumi.Input[builtins.int] cpu_per_worker: Number of central processing units (CPUs) allocated. This parameter affects the processing power of the computation, especially in tasks that require a large amount of parallel processing.
|
|
1423
|
+
:param pulumi.Input[builtins.int] gpu_per_worker: Number of graphics processing units (GPUs). GPUs are a key component in deep learning and large-scale data processing, so this parameter is very important for tasks that require graphics-accelerated computing.
|
|
1424
|
+
:param pulumi.Input[builtins.int] memory_per_worker: The amount of memory available. Memory size has an important impact on the performance and stability of the program, especially when dealing with large data sets or high-dimensional data.
|
|
1425
|
+
:param pulumi.Input[builtins.int] share_memory: Shared memory GB allocation
|
|
1426
|
+
:param pulumi.Input[builtins.int] worker_num: The total number of nodes. This parameter directly affects the parallelism and computing speed of the task, and a higher number of working nodes usually accelerates the completion of the task.
|
|
1427
|
+
:param pulumi.Input[builtins.str] cuda_version: The version of CUDA(Compute Unified Device Architecture) used. CUDA is a parallel computing platform and programming model provided by NVIDIA. A specific version may affect the available GPU functions and performance optimization.
|
|
1428
|
+
:param pulumi.Input[builtins.str] gpu_driver_version: The version of the GPU driver used. Driver version may affect GPU performance and compatibility, so it is important to ensure that the correct version is used
|
|
1429
|
+
:param pulumi.Input[builtins.str] nccl_version: The NVIDIA Collective Communications Library(NCCL) version used. NCCL is a library for multi-GPU and multi-node communication. This parameter is particularly important for optimizing data transmission in distributed computing.
|
|
1430
|
+
:param pulumi.Input[builtins.str] py_torch_version: The version of the PyTorch framework used. PyTorch is a widely used deep learning library, and differences between versions may affect the performance and functional support of model training and inference.
|
|
1431
|
+
"""
|
|
1432
|
+
pulumi.set(__self__, "cpu_per_worker", cpu_per_worker)
|
|
1433
|
+
pulumi.set(__self__, "gpu_per_worker", gpu_per_worker)
|
|
1434
|
+
pulumi.set(__self__, "memory_per_worker", memory_per_worker)
|
|
1435
|
+
pulumi.set(__self__, "share_memory", share_memory)
|
|
1436
|
+
pulumi.set(__self__, "worker_num", worker_num)
|
|
1437
|
+
if cuda_version is not None:
|
|
1438
|
+
pulumi.set(__self__, "cuda_version", cuda_version)
|
|
1439
|
+
if gpu_driver_version is not None:
|
|
1440
|
+
pulumi.set(__self__, "gpu_driver_version", gpu_driver_version)
|
|
1441
|
+
if nccl_version is not None:
|
|
1442
|
+
pulumi.set(__self__, "nccl_version", nccl_version)
|
|
1443
|
+
if py_torch_version is not None:
|
|
1444
|
+
pulumi.set(__self__, "py_torch_version", py_torch_version)
|
|
1445
|
+
|
|
1446
|
+
@property
|
|
1447
|
+
@pulumi.getter(name="cpuPerWorker")
|
|
1448
|
+
def cpu_per_worker(self) -> pulumi.Input[builtins.int]:
|
|
1449
|
+
"""
|
|
1450
|
+
Number of central processing units (CPUs) allocated. This parameter affects the processing power of the computation, especially in tasks that require a large amount of parallel processing.
|
|
1451
|
+
"""
|
|
1452
|
+
return pulumi.get(self, "cpu_per_worker")
|
|
1453
|
+
|
|
1454
|
+
@cpu_per_worker.setter
|
|
1455
|
+
def cpu_per_worker(self, value: pulumi.Input[builtins.int]):
|
|
1456
|
+
pulumi.set(self, "cpu_per_worker", value)
|
|
1457
|
+
|
|
1458
|
+
@property
|
|
1459
|
+
@pulumi.getter(name="gpuPerWorker")
|
|
1460
|
+
def gpu_per_worker(self) -> pulumi.Input[builtins.int]:
|
|
1461
|
+
"""
|
|
1462
|
+
Number of graphics processing units (GPUs). GPUs are a key component in deep learning and large-scale data processing, so this parameter is very important for tasks that require graphics-accelerated computing.
|
|
1463
|
+
"""
|
|
1464
|
+
return pulumi.get(self, "gpu_per_worker")
|
|
1465
|
+
|
|
1466
|
+
@gpu_per_worker.setter
|
|
1467
|
+
def gpu_per_worker(self, value: pulumi.Input[builtins.int]):
|
|
1468
|
+
pulumi.set(self, "gpu_per_worker", value)
|
|
1469
|
+
|
|
1470
|
+
@property
|
|
1471
|
+
@pulumi.getter(name="memoryPerWorker")
|
|
1472
|
+
def memory_per_worker(self) -> pulumi.Input[builtins.int]:
|
|
1473
|
+
"""
|
|
1474
|
+
The amount of memory available. Memory size has an important impact on the performance and stability of the program, especially when dealing with large data sets or high-dimensional data.
|
|
1475
|
+
"""
|
|
1476
|
+
return pulumi.get(self, "memory_per_worker")
|
|
1477
|
+
|
|
1478
|
+
@memory_per_worker.setter
|
|
1479
|
+
def memory_per_worker(self, value: pulumi.Input[builtins.int]):
|
|
1480
|
+
pulumi.set(self, "memory_per_worker", value)
|
|
1481
|
+
|
|
1482
|
+
@property
|
|
1483
|
+
@pulumi.getter(name="shareMemory")
|
|
1484
|
+
def share_memory(self) -> pulumi.Input[builtins.int]:
|
|
1485
|
+
"""
|
|
1486
|
+
Shared memory GB allocation
|
|
1487
|
+
"""
|
|
1488
|
+
return pulumi.get(self, "share_memory")
|
|
1489
|
+
|
|
1490
|
+
@share_memory.setter
|
|
1491
|
+
def share_memory(self, value: pulumi.Input[builtins.int]):
|
|
1492
|
+
pulumi.set(self, "share_memory", value)
|
|
1493
|
+
|
|
1494
|
+
@property
|
|
1495
|
+
@pulumi.getter(name="workerNum")
|
|
1496
|
+
def worker_num(self) -> pulumi.Input[builtins.int]:
|
|
1497
|
+
"""
|
|
1498
|
+
The total number of nodes. This parameter directly affects the parallelism and computing speed of the task, and a higher number of working nodes usually accelerates the completion of the task.
|
|
1499
|
+
"""
|
|
1500
|
+
return pulumi.get(self, "worker_num")
|
|
1501
|
+
|
|
1502
|
+
@worker_num.setter
|
|
1503
|
+
def worker_num(self, value: pulumi.Input[builtins.int]):
|
|
1504
|
+
pulumi.set(self, "worker_num", value)
|
|
1505
|
+
|
|
1506
|
+
@property
|
|
1507
|
+
@pulumi.getter(name="cudaVersion")
|
|
1508
|
+
def cuda_version(self) -> Optional[pulumi.Input[builtins.str]]:
|
|
1509
|
+
"""
|
|
1510
|
+
The version of CUDA(Compute Unified Device Architecture) used. CUDA is a parallel computing platform and programming model provided by NVIDIA. A specific version may affect the available GPU functions and performance optimization.
|
|
1511
|
+
"""
|
|
1512
|
+
return pulumi.get(self, "cuda_version")
|
|
1513
|
+
|
|
1514
|
+
@cuda_version.setter
|
|
1515
|
+
def cuda_version(self, value: Optional[pulumi.Input[builtins.str]]):
|
|
1516
|
+
pulumi.set(self, "cuda_version", value)
|
|
1517
|
+
|
|
1518
|
+
@property
|
|
1519
|
+
@pulumi.getter(name="gpuDriverVersion")
|
|
1520
|
+
def gpu_driver_version(self) -> Optional[pulumi.Input[builtins.str]]:
|
|
1521
|
+
"""
|
|
1522
|
+
The version of the GPU driver used. Driver version may affect GPU performance and compatibility, so it is important to ensure that the correct version is used
|
|
1523
|
+
"""
|
|
1524
|
+
return pulumi.get(self, "gpu_driver_version")
|
|
1525
|
+
|
|
1526
|
+
@gpu_driver_version.setter
|
|
1527
|
+
def gpu_driver_version(self, value: Optional[pulumi.Input[builtins.str]]):
|
|
1528
|
+
pulumi.set(self, "gpu_driver_version", value)
|
|
1529
|
+
|
|
1530
|
+
@property
|
|
1531
|
+
@pulumi.getter(name="ncclVersion")
|
|
1532
|
+
def nccl_version(self) -> Optional[pulumi.Input[builtins.str]]:
|
|
1533
|
+
"""
|
|
1534
|
+
The NVIDIA Collective Communications Library(NCCL) version used. NCCL is a library for multi-GPU and multi-node communication. This parameter is particularly important for optimizing data transmission in distributed computing.
|
|
1535
|
+
"""
|
|
1536
|
+
return pulumi.get(self, "nccl_version")
|
|
1537
|
+
|
|
1538
|
+
@nccl_version.setter
|
|
1539
|
+
def nccl_version(self, value: Optional[pulumi.Input[builtins.str]]):
|
|
1540
|
+
pulumi.set(self, "nccl_version", value)
|
|
1541
|
+
|
|
1542
|
+
@property
|
|
1543
|
+
@pulumi.getter(name="pyTorchVersion")
|
|
1544
|
+
def py_torch_version(self) -> Optional[pulumi.Input[builtins.str]]:
|
|
1545
|
+
"""
|
|
1546
|
+
The version of the PyTorch framework used. PyTorch is a widely used deep learning library, and differences between versions may affect the performance and functional support of model training and inference.
|
|
1547
|
+
"""
|
|
1548
|
+
return pulumi.get(self, "py_torch_version")
|
|
1549
|
+
|
|
1550
|
+
@py_torch_version.setter
|
|
1551
|
+
def py_torch_version(self, value: Optional[pulumi.Input[builtins.str]]):
|
|
1552
|
+
pulumi.set(self, "py_torch_version", value)
|
|
1553
|
+
|
|
1554
|
+
|
|
1230
1555
|
if not MYPY:
|
|
1231
1556
|
class NodeGroupIpAllocationPolicyArgsDict(TypedDict):
|
|
1232
1557
|
bond_policy: NotRequired[pulumi.Input['NodeGroupIpAllocationPolicyBondPolicyArgsDict']]
|
|
@@ -1716,3 +2041,301 @@ class NodeGroupNodeArgs:
|
|
|
1716
2041
|
pulumi.set(self, "vswitch_id", value)
|
|
1717
2042
|
|
|
1718
2043
|
|
|
2044
|
+
if not MYPY:
|
|
2045
|
+
class ResourceMachineTypesArgsDict(TypedDict):
|
|
2046
|
+
cpu_info: pulumi.Input[builtins.str]
|
|
2047
|
+
"""
|
|
2048
|
+
Provides CPU details, including the number of cores, number of threads, clock frequency, and architecture type. This information helps to evaluate the processing power and identify whether it can meet the performance requirements of a particular application.
|
|
2049
|
+
"""
|
|
2050
|
+
gpu_info: pulumi.Input[builtins.str]
|
|
2051
|
+
"""
|
|
2052
|
+
Provides detailed information about the GPU, including the number, model, memory size, and computing capability. This information is particularly important for tasks such as deep learning, scientific computing, and graph processing, helping users understand the graph processing capabilities of nodes.
|
|
2053
|
+
"""
|
|
2054
|
+
bond_num: NotRequired[pulumi.Input[builtins.int]]
|
|
2055
|
+
"""
|
|
2056
|
+
This property specifies the number of network bindings, which relates to the number of physical or virtual network cards connected to the network through the network interface card (NIC). Multiple network bindings can increase bandwidth and redundancy and improve network reliability.
|
|
2057
|
+
"""
|
|
2058
|
+
disk_info: NotRequired[pulumi.Input[builtins.str]]
|
|
2059
|
+
"""
|
|
2060
|
+
Displays information about the storage device, including the disk type (such as SSD or HDD), capacity, and I/O performance. Storage performance is critical in data-intensive applications such as big data processing and databases.
|
|
2061
|
+
"""
|
|
2062
|
+
memory_info: NotRequired[pulumi.Input[builtins.str]]
|
|
2063
|
+
"""
|
|
2064
|
+
This property provides memory details, including total memory, available memory, and usage. This helps users understand the memory processing capabilities of compute nodes, especially when running heavy-duty applications.
|
|
2065
|
+
"""
|
|
2066
|
+
name: NotRequired[pulumi.Input[builtins.str]]
|
|
2067
|
+
"""
|
|
2068
|
+
Specification Name.
|
|
2069
|
+
"""
|
|
2070
|
+
network_info: NotRequired[pulumi.Input[builtins.str]]
|
|
2071
|
+
"""
|
|
2072
|
+
Contains detailed information about the network interface, such as network bandwidth, latency, protocol types supported by the network, IP addresses, and network topology. Optimizing network information is essential to ensure efficient data transmission and low latency.
|
|
2073
|
+
"""
|
|
2074
|
+
network_mode: NotRequired[pulumi.Input[builtins.str]]
|
|
2075
|
+
"""
|
|
2076
|
+
Specifies the network mode, such as bridge mode, NAT mode, or direct connection mode. Different network modes affect the network configuration and data transmission performance of nodes, and affect the network access methods of computing instances.
|
|
2077
|
+
"""
|
|
2078
|
+
node_count: NotRequired[pulumi.Input[builtins.int]]
|
|
2079
|
+
"""
|
|
2080
|
+
Specifies the total number of compute nodes. This property is particularly important in distributed computing and cluster environments, because the number of nodes often directly affects the computing power and the ability to parallel processing.
|
|
2081
|
+
"""
|
|
2082
|
+
type: NotRequired[pulumi.Input[builtins.str]]
|
|
2083
|
+
"""
|
|
2084
|
+
Usually refers to a specific resource type (such as virtual machine, physical server, container, etc.), which is used to distinguish different computing units or resource categories.
|
|
2085
|
+
"""
|
|
2086
|
+
elif False:
|
|
2087
|
+
ResourceMachineTypesArgsDict: TypeAlias = Mapping[str, Any]
|
|
2088
|
+
|
|
2089
|
+
@pulumi.input_type
|
|
2090
|
+
class ResourceMachineTypesArgs:
|
|
2091
|
+
def __init__(__self__, *,
|
|
2092
|
+
cpu_info: pulumi.Input[builtins.str],
|
|
2093
|
+
gpu_info: pulumi.Input[builtins.str],
|
|
2094
|
+
bond_num: Optional[pulumi.Input[builtins.int]] = None,
|
|
2095
|
+
disk_info: Optional[pulumi.Input[builtins.str]] = None,
|
|
2096
|
+
memory_info: Optional[pulumi.Input[builtins.str]] = None,
|
|
2097
|
+
name: Optional[pulumi.Input[builtins.str]] = None,
|
|
2098
|
+
network_info: Optional[pulumi.Input[builtins.str]] = None,
|
|
2099
|
+
network_mode: Optional[pulumi.Input[builtins.str]] = None,
|
|
2100
|
+
node_count: Optional[pulumi.Input[builtins.int]] = None,
|
|
2101
|
+
type: Optional[pulumi.Input[builtins.str]] = None):
|
|
2102
|
+
"""
|
|
2103
|
+
:param pulumi.Input[builtins.str] cpu_info: Provides CPU details, including the number of cores, number of threads, clock frequency, and architecture type. This information helps to evaluate the processing power and identify whether it can meet the performance requirements of a particular application.
|
|
2104
|
+
:param pulumi.Input[builtins.str] gpu_info: Provides detailed information about the GPU, including the number, model, memory size, and computing capability. This information is particularly important for tasks such as deep learning, scientific computing, and graph processing, helping users understand the graph processing capabilities of nodes.
|
|
2105
|
+
:param pulumi.Input[builtins.int] bond_num: This property specifies the number of network bindings, which relates to the number of physical or virtual network cards connected to the network through the network interface card (NIC). Multiple network bindings can increase bandwidth and redundancy and improve network reliability.
|
|
2106
|
+
:param pulumi.Input[builtins.str] disk_info: Displays information about the storage device, including the disk type (such as SSD or HDD), capacity, and I/O performance. Storage performance is critical in data-intensive applications such as big data processing and databases.
|
|
2107
|
+
:param pulumi.Input[builtins.str] memory_info: This property provides memory details, including total memory, available memory, and usage. This helps users understand the memory processing capabilities of compute nodes, especially when running heavy-duty applications.
|
|
2108
|
+
:param pulumi.Input[builtins.str] name: Specification Name.
|
|
2109
|
+
:param pulumi.Input[builtins.str] network_info: Contains detailed information about the network interface, such as network bandwidth, latency, protocol types supported by the network, IP addresses, and network topology. Optimizing network information is essential to ensure efficient data transmission and low latency.
|
|
2110
|
+
:param pulumi.Input[builtins.str] network_mode: Specifies the network mode, such as bridge mode, NAT mode, or direct connection mode. Different network modes affect the network configuration and data transmission performance of nodes, and affect the network access methods of computing instances.
|
|
2111
|
+
:param pulumi.Input[builtins.int] node_count: Specifies the total number of compute nodes. This property is particularly important in distributed computing and cluster environments, because the number of nodes often directly affects the computing power and the ability to parallel processing.
|
|
2112
|
+
:param pulumi.Input[builtins.str] type: Usually refers to a specific resource type (such as virtual machine, physical server, container, etc.), which is used to distinguish different computing units or resource categories.
|
|
2113
|
+
"""
|
|
2114
|
+
pulumi.set(__self__, "cpu_info", cpu_info)
|
|
2115
|
+
pulumi.set(__self__, "gpu_info", gpu_info)
|
|
2116
|
+
if bond_num is not None:
|
|
2117
|
+
pulumi.set(__self__, "bond_num", bond_num)
|
|
2118
|
+
if disk_info is not None:
|
|
2119
|
+
pulumi.set(__self__, "disk_info", disk_info)
|
|
2120
|
+
if memory_info is not None:
|
|
2121
|
+
pulumi.set(__self__, "memory_info", memory_info)
|
|
2122
|
+
if name is not None:
|
|
2123
|
+
pulumi.set(__self__, "name", name)
|
|
2124
|
+
if network_info is not None:
|
|
2125
|
+
pulumi.set(__self__, "network_info", network_info)
|
|
2126
|
+
if network_mode is not None:
|
|
2127
|
+
pulumi.set(__self__, "network_mode", network_mode)
|
|
2128
|
+
if node_count is not None:
|
|
2129
|
+
pulumi.set(__self__, "node_count", node_count)
|
|
2130
|
+
if type is not None:
|
|
2131
|
+
pulumi.set(__self__, "type", type)
|
|
2132
|
+
|
|
2133
|
+
@property
|
|
2134
|
+
@pulumi.getter(name="cpuInfo")
|
|
2135
|
+
def cpu_info(self) -> pulumi.Input[builtins.str]:
|
|
2136
|
+
"""
|
|
2137
|
+
Provides CPU details, including the number of cores, number of threads, clock frequency, and architecture type. This information helps to evaluate the processing power and identify whether it can meet the performance requirements of a particular application.
|
|
2138
|
+
"""
|
|
2139
|
+
return pulumi.get(self, "cpu_info")
|
|
2140
|
+
|
|
2141
|
+
@cpu_info.setter
|
|
2142
|
+
def cpu_info(self, value: pulumi.Input[builtins.str]):
|
|
2143
|
+
pulumi.set(self, "cpu_info", value)
|
|
2144
|
+
|
|
2145
|
+
@property
|
|
2146
|
+
@pulumi.getter(name="gpuInfo")
|
|
2147
|
+
def gpu_info(self) -> pulumi.Input[builtins.str]:
|
|
2148
|
+
"""
|
|
2149
|
+
Provides detailed information about the GPU, including the number, model, memory size, and computing capability. This information is particularly important for tasks such as deep learning, scientific computing, and graph processing, helping users understand the graph processing capabilities of nodes.
|
|
2150
|
+
"""
|
|
2151
|
+
return pulumi.get(self, "gpu_info")
|
|
2152
|
+
|
|
2153
|
+
@gpu_info.setter
|
|
2154
|
+
def gpu_info(self, value: pulumi.Input[builtins.str]):
|
|
2155
|
+
pulumi.set(self, "gpu_info", value)
|
|
2156
|
+
|
|
2157
|
+
@property
|
|
2158
|
+
@pulumi.getter(name="bondNum")
|
|
2159
|
+
def bond_num(self) -> Optional[pulumi.Input[builtins.int]]:
|
|
2160
|
+
"""
|
|
2161
|
+
This property specifies the number of network bindings, which relates to the number of physical or virtual network cards connected to the network through the network interface card (NIC). Multiple network bindings can increase bandwidth and redundancy and improve network reliability.
|
|
2162
|
+
"""
|
|
2163
|
+
return pulumi.get(self, "bond_num")
|
|
2164
|
+
|
|
2165
|
+
@bond_num.setter
|
|
2166
|
+
def bond_num(self, value: Optional[pulumi.Input[builtins.int]]):
|
|
2167
|
+
pulumi.set(self, "bond_num", value)
|
|
2168
|
+
|
|
2169
|
+
@property
|
|
2170
|
+
@pulumi.getter(name="diskInfo")
|
|
2171
|
+
def disk_info(self) -> Optional[pulumi.Input[builtins.str]]:
|
|
2172
|
+
"""
|
|
2173
|
+
Displays information about the storage device, including the disk type (such as SSD or HDD), capacity, and I/O performance. Storage performance is critical in data-intensive applications such as big data processing and databases.
|
|
2174
|
+
"""
|
|
2175
|
+
return pulumi.get(self, "disk_info")
|
|
2176
|
+
|
|
2177
|
+
@disk_info.setter
|
|
2178
|
+
def disk_info(self, value: Optional[pulumi.Input[builtins.str]]):
|
|
2179
|
+
pulumi.set(self, "disk_info", value)
|
|
2180
|
+
|
|
2181
|
+
@property
|
|
2182
|
+
@pulumi.getter(name="memoryInfo")
|
|
2183
|
+
def memory_info(self) -> Optional[pulumi.Input[builtins.str]]:
|
|
2184
|
+
"""
|
|
2185
|
+
This property provides memory details, including total memory, available memory, and usage. This helps users understand the memory processing capabilities of compute nodes, especially when running heavy-duty applications.
|
|
2186
|
+
"""
|
|
2187
|
+
return pulumi.get(self, "memory_info")
|
|
2188
|
+
|
|
2189
|
+
@memory_info.setter
|
|
2190
|
+
def memory_info(self, value: Optional[pulumi.Input[builtins.str]]):
|
|
2191
|
+
pulumi.set(self, "memory_info", value)
|
|
2192
|
+
|
|
2193
|
+
@property
|
|
2194
|
+
@pulumi.getter
|
|
2195
|
+
def name(self) -> Optional[pulumi.Input[builtins.str]]:
|
|
2196
|
+
"""
|
|
2197
|
+
Specification Name.
|
|
2198
|
+
"""
|
|
2199
|
+
return pulumi.get(self, "name")
|
|
2200
|
+
|
|
2201
|
+
@name.setter
|
|
2202
|
+
def name(self, value: Optional[pulumi.Input[builtins.str]]):
|
|
2203
|
+
pulumi.set(self, "name", value)
|
|
2204
|
+
|
|
2205
|
+
@property
|
|
2206
|
+
@pulumi.getter(name="networkInfo")
|
|
2207
|
+
def network_info(self) -> Optional[pulumi.Input[builtins.str]]:
|
|
2208
|
+
"""
|
|
2209
|
+
Contains detailed information about the network interface, such as network bandwidth, latency, protocol types supported by the network, IP addresses, and network topology. Optimizing network information is essential to ensure efficient data transmission and low latency.
|
|
2210
|
+
"""
|
|
2211
|
+
return pulumi.get(self, "network_info")
|
|
2212
|
+
|
|
2213
|
+
@network_info.setter
|
|
2214
|
+
def network_info(self, value: Optional[pulumi.Input[builtins.str]]):
|
|
2215
|
+
pulumi.set(self, "network_info", value)
|
|
2216
|
+
|
|
2217
|
+
@property
|
|
2218
|
+
@pulumi.getter(name="networkMode")
|
|
2219
|
+
def network_mode(self) -> Optional[pulumi.Input[builtins.str]]:
|
|
2220
|
+
"""
|
|
2221
|
+
Specifies the network mode, such as bridge mode, NAT mode, or direct connection mode. Different network modes affect the network configuration and data transmission performance of nodes, and affect the network access methods of computing instances.
|
|
2222
|
+
"""
|
|
2223
|
+
return pulumi.get(self, "network_mode")
|
|
2224
|
+
|
|
2225
|
+
@network_mode.setter
|
|
2226
|
+
def network_mode(self, value: Optional[pulumi.Input[builtins.str]]):
|
|
2227
|
+
pulumi.set(self, "network_mode", value)
|
|
2228
|
+
|
|
2229
|
+
@property
|
|
2230
|
+
@pulumi.getter(name="nodeCount")
|
|
2231
|
+
def node_count(self) -> Optional[pulumi.Input[builtins.int]]:
|
|
2232
|
+
"""
|
|
2233
|
+
Specifies the total number of compute nodes. This property is particularly important in distributed computing and cluster environments, because the number of nodes often directly affects the computing power and the ability to parallel processing.
|
|
2234
|
+
"""
|
|
2235
|
+
return pulumi.get(self, "node_count")
|
|
2236
|
+
|
|
2237
|
+
@node_count.setter
|
|
2238
|
+
def node_count(self, value: Optional[pulumi.Input[builtins.int]]):
|
|
2239
|
+
pulumi.set(self, "node_count", value)
|
|
2240
|
+
|
|
2241
|
+
@property
|
|
2242
|
+
@pulumi.getter
|
|
2243
|
+
def type(self) -> Optional[pulumi.Input[builtins.str]]:
|
|
2244
|
+
"""
|
|
2245
|
+
Usually refers to a specific resource type (such as virtual machine, physical server, container, etc.), which is used to distinguish different computing units or resource categories.
|
|
2246
|
+
"""
|
|
2247
|
+
return pulumi.get(self, "type")
|
|
2248
|
+
|
|
2249
|
+
@type.setter
|
|
2250
|
+
def type(self, value: Optional[pulumi.Input[builtins.str]]):
|
|
2251
|
+
pulumi.set(self, "type", value)
|
|
2252
|
+
|
|
2253
|
+
|
|
2254
|
+
if not MYPY:
|
|
2255
|
+
class ResourceUserAccessParamArgsDict(TypedDict):
|
|
2256
|
+
access_id: pulumi.Input[builtins.str]
|
|
2257
|
+
"""
|
|
2258
|
+
Access keys are important credentials for authentication.
|
|
2259
|
+
"""
|
|
2260
|
+
access_key: pulumi.Input[builtins.str]
|
|
2261
|
+
"""
|
|
2262
|
+
A Secret Key is a Secret credential paired with an access Key to verify a user's identity and protect the security of an interface.
|
|
2263
|
+
"""
|
|
2264
|
+
endpoint: pulumi.Input[builtins.str]
|
|
2265
|
+
"""
|
|
2266
|
+
An Endpoint is a network address for accessing a service or API, usually a URL to a specific service instance.
|
|
2267
|
+
"""
|
|
2268
|
+
workspace_id: pulumi.Input[builtins.str]
|
|
2269
|
+
"""
|
|
2270
|
+
A Workspace generally refers to a separate space created by a user on a particular computing environment or platform.
|
|
2271
|
+
"""
|
|
2272
|
+
elif False:
|
|
2273
|
+
ResourceUserAccessParamArgsDict: TypeAlias = Mapping[str, Any]
|
|
2274
|
+
|
|
2275
|
+
@pulumi.input_type
|
|
2276
|
+
class ResourceUserAccessParamArgs:
|
|
2277
|
+
def __init__(__self__, *,
|
|
2278
|
+
access_id: pulumi.Input[builtins.str],
|
|
2279
|
+
access_key: pulumi.Input[builtins.str],
|
|
2280
|
+
endpoint: pulumi.Input[builtins.str],
|
|
2281
|
+
workspace_id: pulumi.Input[builtins.str]):
|
|
2282
|
+
"""
|
|
2283
|
+
:param pulumi.Input[builtins.str] access_id: Access keys are important credentials for authentication.
|
|
2284
|
+
:param pulumi.Input[builtins.str] access_key: A Secret Key is a Secret credential paired with an access Key to verify a user's identity and protect the security of an interface.
|
|
2285
|
+
:param pulumi.Input[builtins.str] endpoint: An Endpoint is a network address for accessing a service or API, usually a URL to a specific service instance.
|
|
2286
|
+
:param pulumi.Input[builtins.str] workspace_id: A Workspace generally refers to a separate space created by a user on a particular computing environment or platform.
|
|
2287
|
+
"""
|
|
2288
|
+
pulumi.set(__self__, "access_id", access_id)
|
|
2289
|
+
pulumi.set(__self__, "access_key", access_key)
|
|
2290
|
+
pulumi.set(__self__, "endpoint", endpoint)
|
|
2291
|
+
pulumi.set(__self__, "workspace_id", workspace_id)
|
|
2292
|
+
|
|
2293
|
+
@property
|
|
2294
|
+
@pulumi.getter(name="accessId")
|
|
2295
|
+
def access_id(self) -> pulumi.Input[builtins.str]:
|
|
2296
|
+
"""
|
|
2297
|
+
Access keys are important credentials for authentication.
|
|
2298
|
+
"""
|
|
2299
|
+
return pulumi.get(self, "access_id")
|
|
2300
|
+
|
|
2301
|
+
@access_id.setter
|
|
2302
|
+
def access_id(self, value: pulumi.Input[builtins.str]):
|
|
2303
|
+
pulumi.set(self, "access_id", value)
|
|
2304
|
+
|
|
2305
|
+
@property
|
|
2306
|
+
@pulumi.getter(name="accessKey")
|
|
2307
|
+
def access_key(self) -> pulumi.Input[builtins.str]:
|
|
2308
|
+
"""
|
|
2309
|
+
A Secret Key is a Secret credential paired with an access Key to verify a user's identity and protect the security of an interface.
|
|
2310
|
+
"""
|
|
2311
|
+
return pulumi.get(self, "access_key")
|
|
2312
|
+
|
|
2313
|
+
@access_key.setter
|
|
2314
|
+
def access_key(self, value: pulumi.Input[builtins.str]):
|
|
2315
|
+
pulumi.set(self, "access_key", value)
|
|
2316
|
+
|
|
2317
|
+
@property
|
|
2318
|
+
@pulumi.getter
|
|
2319
|
+
def endpoint(self) -> pulumi.Input[builtins.str]:
|
|
2320
|
+
"""
|
|
2321
|
+
An Endpoint is a network address for accessing a service or API, usually a URL to a specific service instance.
|
|
2322
|
+
"""
|
|
2323
|
+
return pulumi.get(self, "endpoint")
|
|
2324
|
+
|
|
2325
|
+
@endpoint.setter
|
|
2326
|
+
def endpoint(self, value: pulumi.Input[builtins.str]):
|
|
2327
|
+
pulumi.set(self, "endpoint", value)
|
|
2328
|
+
|
|
2329
|
+
@property
|
|
2330
|
+
@pulumi.getter(name="workspaceId")
|
|
2331
|
+
def workspace_id(self) -> pulumi.Input[builtins.str]:
|
|
2332
|
+
"""
|
|
2333
|
+
A Workspace generally refers to a separate space created by a user on a particular computing environment or platform.
|
|
2334
|
+
"""
|
|
2335
|
+
return pulumi.get(self, "workspace_id")
|
|
2336
|
+
|
|
2337
|
+
@workspace_id.setter
|
|
2338
|
+
def workspace_id(self, value: pulumi.Input[builtins.str]):
|
|
2339
|
+
pulumi.set(self, "workspace_id", value)
|
|
2340
|
+
|
|
2341
|
+
|