pulumi-alicloud 3.73.0a1736850863__py3-none-any.whl → 3.74.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-alicloud might be problematic. Click here for more details.

Files changed (123) hide show
  1. pulumi_alicloud/__init__.py +139 -0
  2. pulumi_alicloud/alb/__init__.py +2 -0
  3. pulumi_alicloud/alb/_inputs.py +913 -250
  4. pulumi_alicloud/alb/a_script.py +116 -98
  5. pulumi_alicloud/alb/get_server_groups.py +55 -25
  6. pulumi_alicloud/alb/listener.py +269 -174
  7. pulumi_alicloud/alb/load_balancer.py +239 -118
  8. pulumi_alicloud/alb/load_balancer_access_log_config_attachment.py +303 -0
  9. pulumi_alicloud/alb/load_balancer_zone_shifted_attachment.py +303 -0
  10. pulumi_alicloud/alb/outputs.py +720 -222
  11. pulumi_alicloud/alb/server_group.py +532 -122
  12. pulumi_alicloud/alikafka/instance.py +188 -70
  13. pulumi_alicloud/amqp/instance.py +77 -28
  14. pulumi_alicloud/bastionhost/_inputs.py +40 -38
  15. pulumi_alicloud/bastionhost/get_instances.py +6 -22
  16. pulumi_alicloud/bastionhost/outputs.py +60 -49
  17. pulumi_alicloud/cen/_inputs.py +34 -0
  18. pulumi_alicloud/cen/instance_grant.py +4 -0
  19. pulumi_alicloud/cen/outputs.py +37 -0
  20. pulumi_alicloud/cen/transit_router_multicast_domain.py +123 -36
  21. pulumi_alicloud/cloudcontrol/__init__.py +12 -0
  22. pulumi_alicloud/cloudcontrol/get_prices.py +199 -0
  23. pulumi_alicloud/cloudcontrol/get_products.py +187 -0
  24. pulumi_alicloud/cloudcontrol/get_resource_types.py +178 -0
  25. pulumi_alicloud/cloudcontrol/outputs.py +688 -0
  26. pulumi_alicloud/cloudcontrol/resource.py +407 -0
  27. pulumi_alicloud/cms/_inputs.py +21 -15
  28. pulumi_alicloud/cms/alarm.py +7 -7
  29. pulumi_alicloud/cms/outputs.py +14 -10
  30. pulumi_alicloud/cs/_inputs.py +349 -1
  31. pulumi_alicloud/cs/get_edge_kubernetes_clusters.py +4 -2
  32. pulumi_alicloud/cs/get_managed_kubernetes_clusters.py +2 -0
  33. pulumi_alicloud/cs/kubernetes.py +29 -29
  34. pulumi_alicloud/cs/managed_kubernetes.py +137 -47
  35. pulumi_alicloud/cs/node_pool.py +77 -2
  36. pulumi_alicloud/cs/outputs.py +265 -1
  37. pulumi_alicloud/dataworks/__init__.py +4 -0
  38. pulumi_alicloud/dataworks/_inputs.py +1534 -0
  39. pulumi_alicloud/dataworks/di_alarm_rule.py +708 -0
  40. pulumi_alicloud/dataworks/di_job.py +1005 -0
  41. pulumi_alicloud/dataworks/dw_resource_group.py +703 -0
  42. pulumi_alicloud/dataworks/network.py +331 -0
  43. pulumi_alicloud/dataworks/outputs.py +1247 -0
  44. pulumi_alicloud/dcdn/waf_rule.py +14 -14
  45. pulumi_alicloud/dfs/file_system.py +62 -23
  46. pulumi_alicloud/dfs/mount_point.py +48 -22
  47. pulumi_alicloud/dns/get_alidns_domains.py +7 -2
  48. pulumi_alicloud/dns/outputs.py +8 -0
  49. pulumi_alicloud/ecs/ecs_launch_template.py +122 -0
  50. pulumi_alicloud/ecs/ecs_network_interface.py +14 -14
  51. pulumi_alicloud/ecs/ecs_session_manager_status.py +18 -18
  52. pulumi_alicloud/ecs/instance.py +7 -7
  53. pulumi_alicloud/ecs/launch_template.py +88 -0
  54. pulumi_alicloud/esa/__init__.py +3 -0
  55. pulumi_alicloud/esa/_inputs.py +81 -0
  56. pulumi_alicloud/esa/http_request_header_modification_rule.py +531 -0
  57. pulumi_alicloud/esa/list.py +361 -0
  58. pulumi_alicloud/esa/outputs.py +48 -0
  59. pulumi_alicloud/esa/page.py +376 -0
  60. pulumi_alicloud/esa/rate_plan_instance.py +63 -7
  61. pulumi_alicloud/ess/get_scaling_groups.py +64 -10
  62. pulumi_alicloud/ess/get_scaling_rules.py +74 -10
  63. pulumi_alicloud/ess/outputs.py +357 -17
  64. pulumi_alicloud/eventbridge/get_service.py +2 -6
  65. pulumi_alicloud/fc/_inputs.py +75 -75
  66. pulumi_alicloud/fc/custom_domain.py +2 -2
  67. pulumi_alicloud/fc/function.py +32 -4
  68. pulumi_alicloud/fc/outputs.py +50 -50
  69. pulumi_alicloud/fc/v2_function.py +20 -0
  70. pulumi_alicloud/fc/v3_function.py +53 -6
  71. pulumi_alicloud/hbr/__init__.py +1 -0
  72. pulumi_alicloud/hbr/cross_account.py +338 -0
  73. pulumi_alicloud/ims/oidc_provider.py +28 -0
  74. pulumi_alicloud/kms/__init__.py +1 -0
  75. pulumi_alicloud/kms/get_instances.py +160 -0
  76. pulumi_alicloud/kms/instance.py +0 -60
  77. pulumi_alicloud/kms/outputs.py +19 -0
  78. pulumi_alicloud/log/resource_record.py +42 -26
  79. pulumi_alicloud/maxcompute/__init__.py +5 -0
  80. pulumi_alicloud/maxcompute/_inputs.py +615 -42
  81. pulumi_alicloud/maxcompute/outputs.py +486 -28
  82. pulumi_alicloud/maxcompute/project.py +63 -31
  83. pulumi_alicloud/maxcompute/quota_plan.py +412 -0
  84. pulumi_alicloud/maxcompute/quota_schedule.py +260 -0
  85. pulumi_alicloud/maxcompute/role.py +423 -0
  86. pulumi_alicloud/maxcompute/role_user_attachment.py +368 -0
  87. pulumi_alicloud/maxcompute/tunnel_quota_timer.py +287 -0
  88. pulumi_alicloud/message/service_queue.py +106 -59
  89. pulumi_alicloud/message/service_topic.py +182 -37
  90. pulumi_alicloud/mongodb/account.py +138 -163
  91. pulumi_alicloud/oos/secret_parameter.py +84 -9
  92. pulumi_alicloud/oss/bucket.py +1 -1
  93. pulumi_alicloud/oss/bucket_policy.py +2 -4
  94. pulumi_alicloud/oss/bucket_referer.py +2 -4
  95. pulumi_alicloud/oss/get_buckets.py +16 -2
  96. pulumi_alicloud/oss/outputs.py +18 -17
  97. pulumi_alicloud/pulumi-plugin.json +1 -1
  98. pulumi_alicloud/rds/instance.py +68 -0
  99. pulumi_alicloud/sae/application.py +84 -7
  100. pulumi_alicloud/sls/collection_policy.py +2 -2
  101. pulumi_alicloud/vpc/__init__.py +6 -0
  102. pulumi_alicloud/vpc/get_ipam_ipam_pool_allocations.py +295 -0
  103. pulumi_alicloud/vpc/get_ipam_ipam_pool_cidrs.py +196 -0
  104. pulumi_alicloud/vpc/get_ipam_ipam_pools.py +353 -0
  105. pulumi_alicloud/vpc/get_ipam_ipam_scopes.py +333 -0
  106. pulumi_alicloud/vpc/get_ipam_ipams.py +291 -0
  107. pulumi_alicloud/vpc/get_networks.py +3 -0
  108. pulumi_alicloud/vpc/ipam_ipam_pool.py +47 -0
  109. pulumi_alicloud/vpc/ipam_ipam_scope.py +47 -0
  110. pulumi_alicloud/vpc/ipam_service.py +144 -0
  111. pulumi_alicloud/vpc/ipv4_cidr_block.py +162 -51
  112. pulumi_alicloud/vpc/nat_gateway.py +145 -7
  113. pulumi_alicloud/vpc/outputs.py +777 -0
  114. pulumi_alicloud/vpc/snat_entry.py +107 -48
  115. pulumi_alicloud/vpn/_inputs.py +96 -72
  116. pulumi_alicloud/vpn/connection.py +24 -28
  117. pulumi_alicloud/vpn/outputs.py +64 -48
  118. pulumi_alicloud/yundun/_inputs.py +26 -24
  119. pulumi_alicloud/yundun/outputs.py +37 -22
  120. {pulumi_alicloud-3.73.0a1736850863.dist-info → pulumi_alicloud-3.74.0.dist-info}/METADATA +1 -1
  121. {pulumi_alicloud-3.73.0a1736850863.dist-info → pulumi_alicloud-3.74.0.dist-info}/RECORD +123 -95
  122. {pulumi_alicloud-3.73.0a1736850863.dist-info → pulumi_alicloud-3.74.0.dist-info}/WHEEL +0 -0
  123. {pulumi_alicloud-3.73.0a1736850863.dist-info → pulumi_alicloud-3.74.0.dist-info}/top_level.txt +0 -0
@@ -62,6 +62,7 @@ class NodePoolArgs:
62
62
  platform: Optional[pulumi.Input[str]] = None,
63
63
  pre_user_data: Optional[pulumi.Input[str]] = None,
64
64
  private_pool_options: Optional[pulumi.Input['NodePoolPrivatePoolOptionsArgs']] = None,
65
+ ram_role_name: Optional[pulumi.Input[str]] = None,
65
66
  rds_instances: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
66
67
  resource_group_id: Optional[pulumi.Input[str]] = None,
67
68
  rolling_policy: Optional[pulumi.Input['NodePoolRollingPolicyArgs']] = None,
@@ -148,6 +149,11 @@ class NodePoolArgs:
148
149
  :param pulumi.Input[str] platform: Operating system release, using `image_type` instead.
149
150
  :param pulumi.Input[str] pre_user_data: Node pre custom data, base64-encoded, the script executed before the node is initialized.
150
151
  :param pulumi.Input['NodePoolPrivatePoolOptionsArgs'] private_pool_options: Private node pool configuration. See `private_pool_options` below.
152
+ :param pulumi.Input[str] ram_role_name: The name of the Worker RAM role.
153
+ * If it is empty, the default Worker RAM role created in the cluster will be used.
154
+ * If the specified RAM role is not empty, the specified RAM role must be a **Common Service role**, and its **trusted service** configuration must be **cloud server**. For more information, see [Create a common service role](https://help.aliyun.com/document_detail/116800.html). If the specified RAM role is not the default Worker RAM role created in the cluster, the role name cannot start with 'KubernetesMasterRole-'or 'KubernetesWorkerRole.
155
+
156
+ > **NOTE:** This parameter is only supported for ACK-managed clusters of 1.22 or later versions.
151
157
  :param pulumi.Input[Sequence[pulumi.Input[str]]] rds_instances: The list of RDS instances.
152
158
  :param pulumi.Input[str] resource_group_id: The ID of the resource group
153
159
  :param pulumi.Input['NodePoolRollingPolicyArgs'] rolling_policy: Rotary configuration. See `rolling_policy` below.
@@ -280,6 +286,8 @@ class NodePoolArgs:
280
286
  pulumi.set(__self__, "pre_user_data", pre_user_data)
281
287
  if private_pool_options is not None:
282
288
  pulumi.set(__self__, "private_pool_options", private_pool_options)
289
+ if ram_role_name is not None:
290
+ pulumi.set(__self__, "ram_role_name", ram_role_name)
283
291
  if rds_instances is not None:
284
292
  pulumi.set(__self__, "rds_instances", rds_instances)
285
293
  if resource_group_id is not None:
@@ -854,6 +862,22 @@ class NodePoolArgs:
854
862
  def private_pool_options(self, value: Optional[pulumi.Input['NodePoolPrivatePoolOptionsArgs']]):
855
863
  pulumi.set(self, "private_pool_options", value)
856
864
 
865
+ @property
866
+ @pulumi.getter(name="ramRoleName")
867
+ def ram_role_name(self) -> Optional[pulumi.Input[str]]:
868
+ """
869
+ The name of the Worker RAM role.
870
+ * If it is empty, the default Worker RAM role created in the cluster will be used.
871
+ * If the specified RAM role is not empty, the specified RAM role must be a **Common Service role**, and its **trusted service** configuration must be **cloud server**. For more information, see [Create a common service role](https://help.aliyun.com/document_detail/116800.html). If the specified RAM role is not the default Worker RAM role created in the cluster, the role name cannot start with 'KubernetesMasterRole-'or 'KubernetesWorkerRole.
872
+
873
+ > **NOTE:** This parameter is only supported for ACK-managed clusters of 1.22 or later versions.
874
+ """
875
+ return pulumi.get(self, "ram_role_name")
876
+
877
+ @ram_role_name.setter
878
+ def ram_role_name(self, value: Optional[pulumi.Input[str]]):
879
+ pulumi.set(self, "ram_role_name", value)
880
+
857
881
  @property
858
882
  @pulumi.getter(name="rdsInstances")
859
883
  def rds_instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
@@ -1281,6 +1305,7 @@ class _NodePoolState:
1281
1305
  platform: Optional[pulumi.Input[str]] = None,
1282
1306
  pre_user_data: Optional[pulumi.Input[str]] = None,
1283
1307
  private_pool_options: Optional[pulumi.Input['NodePoolPrivatePoolOptionsArgs']] = None,
1308
+ ram_role_name: Optional[pulumi.Input[str]] = None,
1284
1309
  rds_instances: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
1285
1310
  resource_group_id: Optional[pulumi.Input[str]] = None,
1286
1311
  rolling_policy: Optional[pulumi.Input['NodePoolRollingPolicyArgs']] = None,
@@ -1369,6 +1394,11 @@ class _NodePoolState:
1369
1394
  :param pulumi.Input[str] platform: Operating system release, using `image_type` instead.
1370
1395
  :param pulumi.Input[str] pre_user_data: Node pre custom data, base64-encoded, the script executed before the node is initialized.
1371
1396
  :param pulumi.Input['NodePoolPrivatePoolOptionsArgs'] private_pool_options: Private node pool configuration. See `private_pool_options` below.
1397
+ :param pulumi.Input[str] ram_role_name: The name of the Worker RAM role.
1398
+ * If it is empty, the default Worker RAM role created in the cluster will be used.
1399
+ * If the specified RAM role is not empty, the specified RAM role must be a **Common Service role**, and its **trusted service** configuration must be **cloud server**. For more information, see [Create a common service role](https://help.aliyun.com/document_detail/116800.html). If the specified RAM role is not the default Worker RAM role created in the cluster, the role name cannot start with 'KubernetesMasterRole-'or 'KubernetesWorkerRole.
1400
+
1401
+ > **NOTE:** This parameter is only supported for ACK-managed clusters of 1.22 or later versions.
1372
1402
  :param pulumi.Input[Sequence[pulumi.Input[str]]] rds_instances: The list of RDS instances.
1373
1403
  :param pulumi.Input[str] resource_group_id: The ID of the resource group
1374
1404
  :param pulumi.Input['NodePoolRollingPolicyArgs'] rolling_policy: Rotary configuration. See `rolling_policy` below.
@@ -1506,6 +1536,8 @@ class _NodePoolState:
1506
1536
  pulumi.set(__self__, "pre_user_data", pre_user_data)
1507
1537
  if private_pool_options is not None:
1508
1538
  pulumi.set(__self__, "private_pool_options", private_pool_options)
1539
+ if ram_role_name is not None:
1540
+ pulumi.set(__self__, "ram_role_name", ram_role_name)
1509
1541
  if rds_instances is not None:
1510
1542
  pulumi.set(__self__, "rds_instances", rds_instances)
1511
1543
  if resource_group_id is not None:
@@ -2084,6 +2116,22 @@ class _NodePoolState:
2084
2116
  def private_pool_options(self, value: Optional[pulumi.Input['NodePoolPrivatePoolOptionsArgs']]):
2085
2117
  pulumi.set(self, "private_pool_options", value)
2086
2118
 
2119
+ @property
2120
+ @pulumi.getter(name="ramRoleName")
2121
+ def ram_role_name(self) -> Optional[pulumi.Input[str]]:
2122
+ """
2123
+ The name of the Worker RAM role.
2124
+ * If it is empty, the default Worker RAM role created in the cluster will be used.
2125
+ * If the specified RAM role is not empty, the specified RAM role must be a **Common Service role**, and its **trusted service** configuration must be **cloud server**. For more information, see [Create a common service role](https://help.aliyun.com/document_detail/116800.html). If the specified RAM role is not the default Worker RAM role created in the cluster, the role name cannot start with 'KubernetesMasterRole-'or 'KubernetesWorkerRole.
2126
+
2127
+ > **NOTE:** This parameter is only supported for ACK-managed clusters of 1.22 or later versions.
2128
+ """
2129
+ return pulumi.get(self, "ram_role_name")
2130
+
2131
+ @ram_role_name.setter
2132
+ def ram_role_name(self, value: Optional[pulumi.Input[str]]):
2133
+ pulumi.set(self, "ram_role_name", value)
2134
+
2087
2135
  @property
2088
2136
  @pulumi.getter(name="rdsInstances")
2089
2137
  def rds_instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
@@ -2536,6 +2584,7 @@ class NodePool(pulumi.CustomResource):
2536
2584
  platform: Optional[pulumi.Input[str]] = None,
2537
2585
  pre_user_data: Optional[pulumi.Input[str]] = None,
2538
2586
  private_pool_options: Optional[pulumi.Input[Union['NodePoolPrivatePoolOptionsArgs', 'NodePoolPrivatePoolOptionsArgsDict']]] = None,
2587
+ ram_role_name: Optional[pulumi.Input[str]] = None,
2539
2588
  rds_instances: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2540
2589
  resource_group_id: Optional[pulumi.Input[str]] = None,
2541
2590
  rolling_policy: Optional[pulumi.Input[Union['NodePoolRollingPolicyArgs', 'NodePoolRollingPolicyArgsDict']]] = None,
@@ -2576,7 +2625,7 @@ class NodePool(pulumi.CustomResource):
2576
2625
 
2577
2626
  ## Import
2578
2627
 
2579
- ACK Nodepool can be imported using the id, e.g.
2628
+ Container Service for Kubernetes (ACK) Nodepool can be imported using the id, e.g.
2580
2629
 
2581
2630
  ```sh
2582
2631
  $ pulumi import alicloud:cs/nodePool:NodePool example <cluster_id>:<node_pool_id>
@@ -2636,6 +2685,11 @@ class NodePool(pulumi.CustomResource):
2636
2685
  :param pulumi.Input[str] platform: Operating system release, using `image_type` instead.
2637
2686
  :param pulumi.Input[str] pre_user_data: Node pre custom data, base64-encoded, the script executed before the node is initialized.
2638
2687
  :param pulumi.Input[Union['NodePoolPrivatePoolOptionsArgs', 'NodePoolPrivatePoolOptionsArgsDict']] private_pool_options: Private node pool configuration. See `private_pool_options` below.
2688
+ :param pulumi.Input[str] ram_role_name: The name of the Worker RAM role.
2689
+ * If it is empty, the default Worker RAM role created in the cluster will be used.
2690
+ * If the specified RAM role is not empty, the specified RAM role must be a **Common Service role**, and its **trusted service** configuration must be **cloud server**. For more information, see [Create a common service role](https://help.aliyun.com/document_detail/116800.html). If the specified RAM role is not the default Worker RAM role created in the cluster, the role name cannot start with 'KubernetesMasterRole-'or 'KubernetesWorkerRole.
2691
+
2692
+ > **NOTE:** This parameter is only supported for ACK-managed clusters of 1.22 or later versions.
2639
2693
  :param pulumi.Input[Sequence[pulumi.Input[str]]] rds_instances: The list of RDS instances.
2640
2694
  :param pulumi.Input[str] resource_group_id: The ID of the resource group
2641
2695
  :param pulumi.Input[Union['NodePoolRollingPolicyArgs', 'NodePoolRollingPolicyArgsDict']] rolling_policy: Rotary configuration. See `rolling_policy` below.
@@ -2691,7 +2745,7 @@ class NodePool(pulumi.CustomResource):
2691
2745
 
2692
2746
  ## Import
2693
2747
 
2694
- ACK Nodepool can be imported using the id, e.g.
2748
+ Container Service for Kubernetes (ACK) Nodepool can be imported using the id, e.g.
2695
2749
 
2696
2750
  ```sh
2697
2751
  $ pulumi import alicloud:cs/nodePool:NodePool example <cluster_id>:<node_pool_id>
@@ -2752,6 +2806,7 @@ class NodePool(pulumi.CustomResource):
2752
2806
  platform: Optional[pulumi.Input[str]] = None,
2753
2807
  pre_user_data: Optional[pulumi.Input[str]] = None,
2754
2808
  private_pool_options: Optional[pulumi.Input[Union['NodePoolPrivatePoolOptionsArgs', 'NodePoolPrivatePoolOptionsArgsDict']]] = None,
2809
+ ram_role_name: Optional[pulumi.Input[str]] = None,
2755
2810
  rds_instances: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2756
2811
  resource_group_id: Optional[pulumi.Input[str]] = None,
2757
2812
  rolling_policy: Optional[pulumi.Input[Union['NodePoolRollingPolicyArgs', 'NodePoolRollingPolicyArgsDict']]] = None,
@@ -2837,6 +2892,7 @@ class NodePool(pulumi.CustomResource):
2837
2892
  __props__.__dict__["platform"] = platform
2838
2893
  __props__.__dict__["pre_user_data"] = pre_user_data
2839
2894
  __props__.__dict__["private_pool_options"] = private_pool_options
2895
+ __props__.__dict__["ram_role_name"] = ram_role_name
2840
2896
  __props__.__dict__["rds_instances"] = rds_instances
2841
2897
  __props__.__dict__["resource_group_id"] = resource_group_id
2842
2898
  __props__.__dict__["rolling_policy"] = rolling_policy
@@ -2926,6 +2982,7 @@ class NodePool(pulumi.CustomResource):
2926
2982
  platform: Optional[pulumi.Input[str]] = None,
2927
2983
  pre_user_data: Optional[pulumi.Input[str]] = None,
2928
2984
  private_pool_options: Optional[pulumi.Input[Union['NodePoolPrivatePoolOptionsArgs', 'NodePoolPrivatePoolOptionsArgsDict']]] = None,
2985
+ ram_role_name: Optional[pulumi.Input[str]] = None,
2929
2986
  rds_instances: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2930
2987
  resource_group_id: Optional[pulumi.Input[str]] = None,
2931
2988
  rolling_policy: Optional[pulumi.Input[Union['NodePoolRollingPolicyArgs', 'NodePoolRollingPolicyArgsDict']]] = None,
@@ -3019,6 +3076,11 @@ class NodePool(pulumi.CustomResource):
3019
3076
  :param pulumi.Input[str] platform: Operating system release, using `image_type` instead.
3020
3077
  :param pulumi.Input[str] pre_user_data: Node pre custom data, base64-encoded, the script executed before the node is initialized.
3021
3078
  :param pulumi.Input[Union['NodePoolPrivatePoolOptionsArgs', 'NodePoolPrivatePoolOptionsArgsDict']] private_pool_options: Private node pool configuration. See `private_pool_options` below.
3079
+ :param pulumi.Input[str] ram_role_name: The name of the Worker RAM role.
3080
+ * If it is empty, the default Worker RAM role created in the cluster will be used.
3081
+ * If the specified RAM role is not empty, the specified RAM role must be a **Common Service role**, and its **trusted service** configuration must be **cloud server**. For more information, see [Create a common service role](https://help.aliyun.com/document_detail/116800.html). If the specified RAM role is not the default Worker RAM role created in the cluster, the role name cannot start with 'KubernetesMasterRole-'or 'KubernetesWorkerRole.
3082
+
3083
+ > **NOTE:** This parameter is only supported for ACK-managed clusters of 1.22 or later versions.
3022
3084
  :param pulumi.Input[Sequence[pulumi.Input[str]]] rds_instances: The list of RDS instances.
3023
3085
  :param pulumi.Input[str] resource_group_id: The ID of the resource group
3024
3086
  :param pulumi.Input[Union['NodePoolRollingPolicyArgs', 'NodePoolRollingPolicyArgsDict']] rolling_policy: Rotary configuration. See `rolling_policy` below.
@@ -3107,6 +3169,7 @@ class NodePool(pulumi.CustomResource):
3107
3169
  __props__.__dict__["platform"] = platform
3108
3170
  __props__.__dict__["pre_user_data"] = pre_user_data
3109
3171
  __props__.__dict__["private_pool_options"] = private_pool_options
3172
+ __props__.__dict__["ram_role_name"] = ram_role_name
3110
3173
  __props__.__dict__["rds_instances"] = rds_instances
3111
3174
  __props__.__dict__["resource_group_id"] = resource_group_id
3112
3175
  __props__.__dict__["rolling_policy"] = rolling_policy
@@ -3486,6 +3549,18 @@ class NodePool(pulumi.CustomResource):
3486
3549
  """
3487
3550
  return pulumi.get(self, "private_pool_options")
3488
3551
 
3552
+ @property
3553
+ @pulumi.getter(name="ramRoleName")
3554
+ def ram_role_name(self) -> pulumi.Output[str]:
3555
+ """
3556
+ The name of the Worker RAM role.
3557
+ * If it is empty, the default Worker RAM role created in the cluster will be used.
3558
+ * If the specified RAM role is not empty, the specified RAM role must be a **Common Service role**, and its **trusted service** configuration must be **cloud server**. For more information, see [Create a common service role](https://help.aliyun.com/document_detail/116800.html). If the specified RAM role is not the default Worker RAM role created in the cluster, the role name cannot start with 'KubernetesMasterRole-'or 'KubernetesWorkerRole.
3559
+
3560
+ > **NOTE:** This parameter is only supported for ACK-managed clusters of 1.22 or later versions.
3561
+ """
3562
+ return pulumi.get(self, "ram_role_name")
3563
+
3489
3564
  @property
3490
3565
  @pulumi.getter(name="rdsInstances")
3491
3566
  def rds_instances(self) -> pulumi.Output[Optional[Sequence[str]]]:
@@ -43,6 +43,8 @@ __all__ = [
43
43
  'ManagedKubernetesRrsaMetadata',
44
44
  'NodePoolDataDisk',
45
45
  'NodePoolKubeletConfiguration',
46
+ 'NodePoolKubeletConfigurationReservedMemory',
47
+ 'NodePoolKubeletConfigurationTracing',
46
48
  'NodePoolLabel',
47
49
  'NodePoolManagement',
48
50
  'NodePoolManagementAutoRepairPolicy',
@@ -2065,10 +2067,20 @@ class NodePoolKubeletConfiguration(dict):
2065
2067
  suggest = None
2066
2068
  if key == "allowedUnsafeSysctls":
2067
2069
  suggest = "allowed_unsafe_sysctls"
2070
+ elif key == "clusterDns":
2071
+ suggest = "cluster_dns"
2068
2072
  elif key == "containerLogMaxFiles":
2069
2073
  suggest = "container_log_max_files"
2070
2074
  elif key == "containerLogMaxSize":
2071
2075
  suggest = "container_log_max_size"
2076
+ elif key == "containerLogMaxWorkers":
2077
+ suggest = "container_log_max_workers"
2078
+ elif key == "containerLogMonitorInterval":
2079
+ suggest = "container_log_monitor_interval"
2080
+ elif key == "cpuCfsQuota":
2081
+ suggest = "cpu_cfs_quota"
2082
+ elif key == "cpuCfsQuotaPeriod":
2083
+ suggest = "cpu_cfs_quota_period"
2072
2084
  elif key == "cpuManagerPolicy":
2073
2085
  suggest = "cpu_manager_policy"
2074
2086
  elif key == "eventBurst":
@@ -2083,6 +2095,10 @@ class NodePoolKubeletConfiguration(dict):
2083
2095
  suggest = "eviction_soft_grace_period"
2084
2096
  elif key == "featureGates":
2085
2097
  suggest = "feature_gates"
2098
+ elif key == "imageGcHighThresholdPercent":
2099
+ suggest = "image_gc_high_threshold_percent"
2100
+ elif key == "imageGcLowThresholdPercent":
2101
+ suggest = "image_gc_low_threshold_percent"
2086
2102
  elif key == "kubeApiBurst":
2087
2103
  suggest = "kube_api_burst"
2088
2104
  elif key == "kubeApiQps":
@@ -2091,16 +2107,24 @@ class NodePoolKubeletConfiguration(dict):
2091
2107
  suggest = "kube_reserved"
2092
2108
  elif key == "maxPods":
2093
2109
  suggest = "max_pods"
2110
+ elif key == "memoryManagerPolicy":
2111
+ suggest = "memory_manager_policy"
2112
+ elif key == "podPidsLimit":
2113
+ suggest = "pod_pids_limit"
2094
2114
  elif key == "readOnlyPort":
2095
2115
  suggest = "read_only_port"
2096
2116
  elif key == "registryBurst":
2097
2117
  suggest = "registry_burst"
2098
2118
  elif key == "registryPullQps":
2099
2119
  suggest = "registry_pull_qps"
2120
+ elif key == "reservedMemories":
2121
+ suggest = "reserved_memories"
2100
2122
  elif key == "serializeImagePulls":
2101
2123
  suggest = "serialize_image_pulls"
2102
2124
  elif key == "systemReserved":
2103
2125
  suggest = "system_reserved"
2126
+ elif key == "topologyManagerPolicy":
2127
+ suggest = "topology_manager_policy"
2104
2128
 
2105
2129
  if suggest:
2106
2130
  pulumi.log.warn(f"Key '{key}' not found in NodePoolKubeletConfiguration. Access the value via the '{suggest}' property getter instead.")
@@ -2115,8 +2139,13 @@ class NodePoolKubeletConfiguration(dict):
2115
2139
 
2116
2140
  def __init__(__self__, *,
2117
2141
  allowed_unsafe_sysctls: Optional[Sequence[str]] = None,
2142
+ cluster_dns: Optional[Sequence[str]] = None,
2118
2143
  container_log_max_files: Optional[str] = None,
2119
2144
  container_log_max_size: Optional[str] = None,
2145
+ container_log_max_workers: Optional[str] = None,
2146
+ container_log_monitor_interval: Optional[str] = None,
2147
+ cpu_cfs_quota: Optional[str] = None,
2148
+ cpu_cfs_quota_period: Optional[str] = None,
2120
2149
  cpu_manager_policy: Optional[str] = None,
2121
2150
  event_burst: Optional[str] = None,
2122
2151
  event_record_qps: Optional[str] = None,
@@ -2124,19 +2153,31 @@ class NodePoolKubeletConfiguration(dict):
2124
2153
  eviction_soft: Optional[Mapping[str, str]] = None,
2125
2154
  eviction_soft_grace_period: Optional[Mapping[str, str]] = None,
2126
2155
  feature_gates: Optional[Mapping[str, bool]] = None,
2156
+ image_gc_high_threshold_percent: Optional[str] = None,
2157
+ image_gc_low_threshold_percent: Optional[str] = None,
2127
2158
  kube_api_burst: Optional[str] = None,
2128
2159
  kube_api_qps: Optional[str] = None,
2129
2160
  kube_reserved: Optional[Mapping[str, str]] = None,
2130
2161
  max_pods: Optional[str] = None,
2162
+ memory_manager_policy: Optional[str] = None,
2163
+ pod_pids_limit: Optional[str] = None,
2131
2164
  read_only_port: Optional[str] = None,
2132
2165
  registry_burst: Optional[str] = None,
2133
2166
  registry_pull_qps: Optional[str] = None,
2167
+ reserved_memories: Optional[Sequence['outputs.NodePoolKubeletConfigurationReservedMemory']] = None,
2134
2168
  serialize_image_pulls: Optional[str] = None,
2135
- system_reserved: Optional[Mapping[str, str]] = None):
2169
+ system_reserved: Optional[Mapping[str, str]] = None,
2170
+ topology_manager_policy: Optional[str] = None,
2171
+ tracing: Optional['outputs.NodePoolKubeletConfigurationTracing'] = None):
2136
2172
  """
2137
2173
  :param Sequence[str] allowed_unsafe_sysctls: Allowed sysctl mode whitelist.
2174
+ :param Sequence[str] cluster_dns: The list of IP addresses of the cluster DNS servers.
2138
2175
  :param str container_log_max_files: The maximum number of log files that can exist in each container.
2139
2176
  :param str container_log_max_size: The maximum size that can be reached before a log file is rotated.
2177
+ :param str container_log_max_workers: Specifies the maximum number of concurrent workers required to perform log rotation operations.
2178
+ :param str container_log_monitor_interval: Specifies the duration for which container logs are monitored for log rotation.
2179
+ :param str cpu_cfs_quota: CPU CFS quota constraint switch.
2180
+ :param str cpu_cfs_quota_period: CPU CFS quota period value.
2140
2181
  :param str cpu_manager_policy: Same as cpuManagerPolicy. The name of the policy to use. Requires the CPUManager feature gate to be enabled. Valid value is `none` or `static`.
2141
2182
  :param str event_burst: Same as eventBurst. The maximum size of a burst of event creations, temporarily allows event creations to burst to this number, while still not exceeding `event_record_qps`. It is only used when `event_record_qps` is greater than 0. Valid value is `[0-100]`.
2142
2183
  :param str event_record_qps: Same as eventRecordQPS. The maximum event creations per second. If 0, there is no limit enforced. Valid value is `[0-50]`.
@@ -2144,22 +2185,39 @@ class NodePoolKubeletConfiguration(dict):
2144
2185
  :param Mapping[str, str] eviction_soft: Same as evictionSoft. The map of signal names to quantities that defines soft eviction thresholds. For example: `{"memory.available" = "300Mi"}`.
2145
2186
  :param Mapping[str, str] eviction_soft_grace_period: Same as evictionSoftGracePeriod. The map of signal names to quantities that defines grace periods for each soft eviction signal. For example: `{"memory.available" = "30s"}`.
2146
2187
  :param Mapping[str, bool] feature_gates: Feature switch to enable configuration of experimental features.
2188
+ :param str image_gc_high_threshold_percent: If the image usage exceeds this threshold, image garbage collection will continue.
2189
+ :param str image_gc_low_threshold_percent: Image garbage collection is not performed when the image usage is below this threshold.
2147
2190
  :param str kube_api_burst: Same as kubeAPIBurst. The burst to allow while talking with kubernetes api-server. Valid value is `[0-100]`.
2148
2191
  :param str kube_api_qps: Same as kubeAPIQPS. The QPS to use while talking with kubernetes api-server. Valid value is `[0-50]`.
2149
2192
  :param Mapping[str, str] kube_reserved: Same as kubeReserved. The set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for kubernetes system components. Currently, cpu, memory and local storage for root file system are supported. See [compute resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details.
2150
2193
  :param str max_pods: The maximum number of running pods.
2194
+ :param str memory_manager_policy: The policy to be used by the memory manager.
2195
+ :param str pod_pids_limit: The maximum number of PIDs that can be used in a Pod.
2151
2196
  :param str read_only_port: Read-only port number.
2152
2197
  :param str registry_burst: Same as registryBurst. The maximum size of burst pulls, temporarily allows pulls to burst to this number, while still not exceeding `registry_pull_qps`. Only used if `registry_pull_qps` is greater than 0. Valid value is `[0-100]`.
2153
2198
  :param str registry_pull_qps: Same as registryPullQPS. The limit of registry pulls per second. Setting it to `0` means no limit. Valid value is `[0-50]`.
2199
+ :param Sequence['NodePoolKubeletConfigurationReservedMemoryArgs'] reserved_memories: Reserve memory for NUMA nodes. See `reserved_memory` below.
2154
2200
  :param str serialize_image_pulls: Same as serializeImagePulls. When enabled, it tells the Kubelet to pull images one at a time. We recommend not changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Valid value is `true` or `false`.
2155
2201
  :param Mapping[str, str] system_reserved: Same as systemReserved. The set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for non-kubernetes components. Currently, only cpu and memory are supported. See [compute resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details.
2202
+ :param str topology_manager_policy: Name of the Topology Manager policy used.
2203
+ :param 'NodePoolKubeletConfigurationTracingArgs' tracing: OpenTelemetry tracks the configuration information for client settings versioning. See `tracing` below.
2156
2204
  """
2157
2205
  if allowed_unsafe_sysctls is not None:
2158
2206
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
2207
+ if cluster_dns is not None:
2208
+ pulumi.set(__self__, "cluster_dns", cluster_dns)
2159
2209
  if container_log_max_files is not None:
2160
2210
  pulumi.set(__self__, "container_log_max_files", container_log_max_files)
2161
2211
  if container_log_max_size is not None:
2162
2212
  pulumi.set(__self__, "container_log_max_size", container_log_max_size)
2213
+ if container_log_max_workers is not None:
2214
+ pulumi.set(__self__, "container_log_max_workers", container_log_max_workers)
2215
+ if container_log_monitor_interval is not None:
2216
+ pulumi.set(__self__, "container_log_monitor_interval", container_log_monitor_interval)
2217
+ if cpu_cfs_quota is not None:
2218
+ pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
2219
+ if cpu_cfs_quota_period is not None:
2220
+ pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
2163
2221
  if cpu_manager_policy is not None:
2164
2222
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
2165
2223
  if event_burst is not None:
@@ -2174,6 +2232,10 @@ class NodePoolKubeletConfiguration(dict):
2174
2232
  pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
2175
2233
  if feature_gates is not None:
2176
2234
  pulumi.set(__self__, "feature_gates", feature_gates)
2235
+ if image_gc_high_threshold_percent is not None:
2236
+ pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
2237
+ if image_gc_low_threshold_percent is not None:
2238
+ pulumi.set(__self__, "image_gc_low_threshold_percent", image_gc_low_threshold_percent)
2177
2239
  if kube_api_burst is not None:
2178
2240
  pulumi.set(__self__, "kube_api_burst", kube_api_burst)
2179
2241
  if kube_api_qps is not None:
@@ -2182,16 +2244,26 @@ class NodePoolKubeletConfiguration(dict):
2182
2244
  pulumi.set(__self__, "kube_reserved", kube_reserved)
2183
2245
  if max_pods is not None:
2184
2246
  pulumi.set(__self__, "max_pods", max_pods)
2247
+ if memory_manager_policy is not None:
2248
+ pulumi.set(__self__, "memory_manager_policy", memory_manager_policy)
2249
+ if pod_pids_limit is not None:
2250
+ pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
2185
2251
  if read_only_port is not None:
2186
2252
  pulumi.set(__self__, "read_only_port", read_only_port)
2187
2253
  if registry_burst is not None:
2188
2254
  pulumi.set(__self__, "registry_burst", registry_burst)
2189
2255
  if registry_pull_qps is not None:
2190
2256
  pulumi.set(__self__, "registry_pull_qps", registry_pull_qps)
2257
+ if reserved_memories is not None:
2258
+ pulumi.set(__self__, "reserved_memories", reserved_memories)
2191
2259
  if serialize_image_pulls is not None:
2192
2260
  pulumi.set(__self__, "serialize_image_pulls", serialize_image_pulls)
2193
2261
  if system_reserved is not None:
2194
2262
  pulumi.set(__self__, "system_reserved", system_reserved)
2263
+ if topology_manager_policy is not None:
2264
+ pulumi.set(__self__, "topology_manager_policy", topology_manager_policy)
2265
+ if tracing is not None:
2266
+ pulumi.set(__self__, "tracing", tracing)
2195
2267
 
2196
2268
  @property
2197
2269
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -2201,6 +2273,14 @@ class NodePoolKubeletConfiguration(dict):
2201
2273
  """
2202
2274
  return pulumi.get(self, "allowed_unsafe_sysctls")
2203
2275
 
2276
+ @property
2277
+ @pulumi.getter(name="clusterDns")
2278
+ def cluster_dns(self) -> Optional[Sequence[str]]:
2279
+ """
2280
+ The list of IP addresses of the cluster DNS servers.
2281
+ """
2282
+ return pulumi.get(self, "cluster_dns")
2283
+
2204
2284
  @property
2205
2285
  @pulumi.getter(name="containerLogMaxFiles")
2206
2286
  def container_log_max_files(self) -> Optional[str]:
@@ -2217,6 +2297,38 @@ class NodePoolKubeletConfiguration(dict):
2217
2297
  """
2218
2298
  return pulumi.get(self, "container_log_max_size")
2219
2299
 
2300
+ @property
2301
+ @pulumi.getter(name="containerLogMaxWorkers")
2302
+ def container_log_max_workers(self) -> Optional[str]:
2303
+ """
2304
+ Specifies the maximum number of concurrent workers required to perform log rotation operations.
2305
+ """
2306
+ return pulumi.get(self, "container_log_max_workers")
2307
+
2308
+ @property
2309
+ @pulumi.getter(name="containerLogMonitorInterval")
2310
+ def container_log_monitor_interval(self) -> Optional[str]:
2311
+ """
2312
+ Specifies the duration for which container logs are monitored for log rotation.
2313
+ """
2314
+ return pulumi.get(self, "container_log_monitor_interval")
2315
+
2316
+ @property
2317
+ @pulumi.getter(name="cpuCfsQuota")
2318
+ def cpu_cfs_quota(self) -> Optional[str]:
2319
+ """
2320
+ CPU CFS quota constraint switch.
2321
+ """
2322
+ return pulumi.get(self, "cpu_cfs_quota")
2323
+
2324
+ @property
2325
+ @pulumi.getter(name="cpuCfsQuotaPeriod")
2326
+ def cpu_cfs_quota_period(self) -> Optional[str]:
2327
+ """
2328
+ CPU CFS quota period value.
2329
+ """
2330
+ return pulumi.get(self, "cpu_cfs_quota_period")
2331
+
2220
2332
  @property
2221
2333
  @pulumi.getter(name="cpuManagerPolicy")
2222
2334
  def cpu_manager_policy(self) -> Optional[str]:
@@ -2273,6 +2385,22 @@ class NodePoolKubeletConfiguration(dict):
2273
2385
  """
2274
2386
  return pulumi.get(self, "feature_gates")
2275
2387
 
2388
+ @property
2389
+ @pulumi.getter(name="imageGcHighThresholdPercent")
2390
+ def image_gc_high_threshold_percent(self) -> Optional[str]:
2391
+ """
2392
+ If the image usage exceeds this threshold, image garbage collection will continue.
2393
+ """
2394
+ return pulumi.get(self, "image_gc_high_threshold_percent")
2395
+
2396
+ @property
2397
+ @pulumi.getter(name="imageGcLowThresholdPercent")
2398
+ def image_gc_low_threshold_percent(self) -> Optional[str]:
2399
+ """
2400
+ Image garbage collection is not performed when the image usage is below this threshold.
2401
+ """
2402
+ return pulumi.get(self, "image_gc_low_threshold_percent")
2403
+
2276
2404
  @property
2277
2405
  @pulumi.getter(name="kubeApiBurst")
2278
2406
  def kube_api_burst(self) -> Optional[str]:
@@ -2305,6 +2433,22 @@ class NodePoolKubeletConfiguration(dict):
2305
2433
  """
2306
2434
  return pulumi.get(self, "max_pods")
2307
2435
 
2436
+ @property
2437
+ @pulumi.getter(name="memoryManagerPolicy")
2438
+ def memory_manager_policy(self) -> Optional[str]:
2439
+ """
2440
+ The policy to be used by the memory manager.
2441
+ """
2442
+ return pulumi.get(self, "memory_manager_policy")
2443
+
2444
+ @property
2445
+ @pulumi.getter(name="podPidsLimit")
2446
+ def pod_pids_limit(self) -> Optional[str]:
2447
+ """
2448
+ The maximum number of PIDs that can be used in a Pod.
2449
+ """
2450
+ return pulumi.get(self, "pod_pids_limit")
2451
+
2308
2452
  @property
2309
2453
  @pulumi.getter(name="readOnlyPort")
2310
2454
  def read_only_port(self) -> Optional[str]:
@@ -2329,6 +2473,14 @@ class NodePoolKubeletConfiguration(dict):
2329
2473
  """
2330
2474
  return pulumi.get(self, "registry_pull_qps")
2331
2475
 
2476
+ @property
2477
+ @pulumi.getter(name="reservedMemories")
2478
+ def reserved_memories(self) -> Optional[Sequence['outputs.NodePoolKubeletConfigurationReservedMemory']]:
2479
+ """
2480
+ Reserve memory for NUMA nodes. See `reserved_memory` below.
2481
+ """
2482
+ return pulumi.get(self, "reserved_memories")
2483
+
2332
2484
  @property
2333
2485
  @pulumi.getter(name="serializeImagePulls")
2334
2486
  def serialize_image_pulls(self) -> Optional[str]:
@@ -2345,6 +2497,118 @@ class NodePoolKubeletConfiguration(dict):
2345
2497
  """
2346
2498
  return pulumi.get(self, "system_reserved")
2347
2499
 
2500
+ @property
2501
+ @pulumi.getter(name="topologyManagerPolicy")
2502
+ def topology_manager_policy(self) -> Optional[str]:
2503
+ """
2504
+ Name of the Topology Manager policy used.
2505
+ """
2506
+ return pulumi.get(self, "topology_manager_policy")
2507
+
2508
+ @property
2509
+ @pulumi.getter
2510
+ def tracing(self) -> Optional['outputs.NodePoolKubeletConfigurationTracing']:
2511
+ """
2512
+ OpenTelemetry tracks the configuration information for client settings versioning. See `tracing` below.
2513
+ """
2514
+ return pulumi.get(self, "tracing")
2515
+
2516
+
2517
+ @pulumi.output_type
2518
+ class NodePoolKubeletConfigurationReservedMemory(dict):
2519
+ @staticmethod
2520
+ def __key_warning(key: str):
2521
+ suggest = None
2522
+ if key == "numaNode":
2523
+ suggest = "numa_node"
2524
+
2525
+ if suggest:
2526
+ pulumi.log.warn(f"Key '{key}' not found in NodePoolKubeletConfigurationReservedMemory. Access the value via the '{suggest}' property getter instead.")
2527
+
2528
+ def __getitem__(self, key: str) -> Any:
2529
+ NodePoolKubeletConfigurationReservedMemory.__key_warning(key)
2530
+ return super().__getitem__(key)
2531
+
2532
+ def get(self, key: str, default = None) -> Any:
2533
+ NodePoolKubeletConfigurationReservedMemory.__key_warning(key)
2534
+ return super().get(key, default)
2535
+
2536
+ def __init__(__self__, *,
2537
+ limits: Optional[Mapping[str, str]] = None,
2538
+ numa_node: Optional[int] = None):
2539
+ """
2540
+ :param Mapping[str, str] limits: Memory resource limit.
2541
+ :param int numa_node: The NUMA node.
2542
+ """
2543
+ if limits is not None:
2544
+ pulumi.set(__self__, "limits", limits)
2545
+ if numa_node is not None:
2546
+ pulumi.set(__self__, "numa_node", numa_node)
2547
+
2548
+ @property
2549
+ @pulumi.getter
2550
+ def limits(self) -> Optional[Mapping[str, str]]:
2551
+ """
2552
+ Memory resource limit.
2553
+ """
2554
+ return pulumi.get(self, "limits")
2555
+
2556
+ @property
2557
+ @pulumi.getter(name="numaNode")
2558
+ def numa_node(self) -> Optional[int]:
2559
+ """
2560
+ The NUMA node.
2561
+ """
2562
+ return pulumi.get(self, "numa_node")
2563
+
2564
+
2565
+ @pulumi.output_type
2566
+ class NodePoolKubeletConfigurationTracing(dict):
2567
+ @staticmethod
2568
+ def __key_warning(key: str):
2569
+ suggest = None
2570
+ if key == "samplingRatePerMillion":
2571
+ suggest = "sampling_rate_per_million"
2572
+
2573
+ if suggest:
2574
+ pulumi.log.warn(f"Key '{key}' not found in NodePoolKubeletConfigurationTracing. Access the value via the '{suggest}' property getter instead.")
2575
+
2576
+ def __getitem__(self, key: str) -> Any:
2577
+ NodePoolKubeletConfigurationTracing.__key_warning(key)
2578
+ return super().__getitem__(key)
2579
+
2580
+ def get(self, key: str, default = None) -> Any:
2581
+ NodePoolKubeletConfigurationTracing.__key_warning(key)
2582
+ return super().get(key, default)
2583
+
2584
+ def __init__(__self__, *,
2585
+ endpoint: Optional[str] = None,
2586
+ sampling_rate_per_million: Optional[str] = None):
2587
+ """
2588
+ :param str endpoint: The endpoint of the collector.
2589
+ :param str sampling_rate_per_million: Number of samples to be collected per million span.
2590
+ """
2591
+ if endpoint is not None:
2592
+ pulumi.set(__self__, "endpoint", endpoint)
2593
+ if sampling_rate_per_million is not None:
2594
+ pulumi.set(__self__, "sampling_rate_per_million", sampling_rate_per_million)
2595
+
2596
+ @property
2597
+ @pulumi.getter
2598
+ def endpoint(self) -> Optional[str]:
2599
+ """
2600
+ The endpoint of the collector.
2601
+ """
2602
+ return pulumi.get(self, "endpoint")
2603
+
2604
+ @property
2605
+ @pulumi.getter(name="samplingRatePerMillion")
2606
+ def sampling_rate_per_million(self) -> Optional[str]:
2607
+ """
2608
+ Number of samples to be collected per million span.
2609
+ """
2610
+ return pulumi.get(self, "sampling_rate_per_million")
2611
+
2348
2612
 
2349
2613
  @pulumi.output_type
2350
2614
  class NodePoolLabel(dict):
@@ -7,9 +7,13 @@ import typing
7
7
  # Export this package's modules as members:
8
8
  from .data_source import *
9
9
  from .data_source_shared_rule import *
10
+ from .di_alarm_rule import *
11
+ from .di_job import *
12
+ from .dw_resource_group import *
10
13
  from .folder import *
11
14
  from .get_folders import *
12
15
  from .get_service import *
16
+ from .network import *
13
17
  from .project import *
14
18
  from .project_member import *
15
19
  from ._inputs import *