pulumi-aws-native 1.38.0a1761803003__py3-none-any.whl → 1.38.0a1762272920__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-aws-native might be problematic. Click here for more details.
- pulumi_aws_native/__init__.py +17 -1
- pulumi_aws_native/appstream/get_image_builder.py +0 -4
- pulumi_aws_native/appstream/image_builder.py +0 -16
- pulumi_aws_native/aps/_inputs.py +58 -0
- pulumi_aws_native/aps/outputs.py +36 -0
- pulumi_aws_native/arcregionswitch/get_plan.py +0 -3
- pulumi_aws_native/arcregionswitch/plan.py +0 -3
- pulumi_aws_native/batch/_inputs.py +9 -0
- pulumi_aws_native/batch/job_definition.py +8 -0
- pulumi_aws_native/batch/outputs.py +6 -0
- pulumi_aws_native/bedrock/automated_reasoning_policy.py +51 -0
- pulumi_aws_native/bedrock/get_automated_reasoning_policy.py +12 -1
- pulumi_aws_native/bedrockagentcore/__init__.py +2 -0
- pulumi_aws_native/bedrockagentcore/get_workload_identity.py +134 -0
- pulumi_aws_native/bedrockagentcore/workload_identity.py +217 -0
- pulumi_aws_native/ce/_enums.py +3 -0
- pulumi_aws_native/cleanrooms/configured_table.py +4 -4
- pulumi_aws_native/cleanrooms/get_configured_table.py +1 -1
- pulumi_aws_native/cognito/__init__.py +2 -0
- pulumi_aws_native/cognito/_enums.py +12 -0
- pulumi_aws_native/cognito/get_terms.py +117 -0
- pulumi_aws_native/cognito/terms.py +236 -0
- pulumi_aws_native/connectcampaignsv2/_inputs.py +28 -0
- pulumi_aws_native/connectcampaignsv2/outputs.py +16 -0
- pulumi_aws_native/datazone/connection.py +30 -1
- pulumi_aws_native/ec2/get_volume.py +37 -15
- pulumi_aws_native/ec2/volume.py +115 -74
- pulumi_aws_native/ecs/_enums.py +9 -0
- pulumi_aws_native/ecs/_inputs.py +96 -15
- pulumi_aws_native/ecs/outputs.py +60 -10
- pulumi_aws_native/eks/_enums.py +11 -0
- pulumi_aws_native/eks/_inputs.py +199 -1
- pulumi_aws_native/eks/get_nodegroup.py +1 -0
- pulumi_aws_native/eks/nodegroup.py +1 -0
- pulumi_aws_native/eks/outputs.py +169 -1
- pulumi_aws_native/glue/__init__.py +2 -0
- pulumi_aws_native/glue/_inputs.py +134 -0
- pulumi_aws_native/glue/get_integration_resource_property.py +127 -0
- pulumi_aws_native/glue/integration_resource_property.py +229 -0
- pulumi_aws_native/glue/outputs.py +122 -0
- pulumi_aws_native/kendra/_inputs.py +21 -21
- pulumi_aws_native/kendra/outputs.py +14 -14
- pulumi_aws_native/networkfirewall/_inputs.py +7 -0
- pulumi_aws_native/networkfirewall/firewall.py +3 -0
- pulumi_aws_native/networkfirewall/get_firewall.py +3 -0
- pulumi_aws_native/networkfirewall/outputs.py +4 -0
- pulumi_aws_native/pulumi-plugin.json +1 -1
- pulumi_aws_native/qbusiness/_inputs.py +3 -3
- pulumi_aws_native/qbusiness/application.py +4 -4
- pulumi_aws_native/qbusiness/outputs.py +2 -2
- pulumi_aws_native/quicksight/_inputs.py +9 -9
- pulumi_aws_native/quicksight/outputs.py +6 -6
- pulumi_aws_native/rtbfabric/_enums.py +3 -0
- pulumi_aws_native/rtbfabric/_inputs.py +70 -0
- pulumi_aws_native/rtbfabric/get_link.py +18 -0
- pulumi_aws_native/rtbfabric/get_requester_gateway.py +15 -0
- pulumi_aws_native/rtbfabric/get_responder_gateway.py +30 -0
- pulumi_aws_native/rtbfabric/link.py +51 -0
- pulumi_aws_native/rtbfabric/outputs.py +46 -0
- pulumi_aws_native/rtbfabric/requester_gateway.py +40 -0
- pulumi_aws_native/rtbfabric/responder_gateway.py +80 -0
- pulumi_aws_native/s3/_enums.py +1 -1
- pulumi_aws_native/s3/_inputs.py +11 -5
- pulumi_aws_native/s3/outputs.py +10 -4
- pulumi_aws_native/s3vectors/__init__.py +17 -0
- pulumi_aws_native/s3vectors/_enums.py +39 -0
- pulumi_aws_native/s3vectors/_inputs.py +138 -0
- pulumi_aws_native/s3vectors/get_index.py +99 -0
- pulumi_aws_native/s3vectors/get_vector_bucket.py +99 -0
- pulumi_aws_native/s3vectors/get_vector_bucket_policy.py +78 -0
- pulumi_aws_native/s3vectors/index.py +367 -0
- pulumi_aws_native/s3vectors/outputs.py +129 -0
- pulumi_aws_native/s3vectors/vector_bucket.py +199 -0
- pulumi_aws_native/s3vectors/vector_bucket_policy.py +188 -0
- pulumi_aws_native/sso/_enums.py +1 -1
- pulumi_aws_native/sso/assignment.py +8 -8
- pulumi_aws_native/transfer/_inputs.py +9 -0
- pulumi_aws_native/transfer/connector.py +3 -0
- pulumi_aws_native/transfer/get_connector.py +3 -0
- pulumi_aws_native/transfer/outputs.py +6 -0
- pulumi_aws_native/wafv2/_inputs.py +72 -0
- pulumi_aws_native/wafv2/get_web_acl.py +15 -1
- pulumi_aws_native/wafv2/outputs.py +45 -0
- pulumi_aws_native/wafv2/web_acl.py +29 -0
- {pulumi_aws_native-1.38.0a1761803003.dist-info → pulumi_aws_native-1.38.0a1762272920.dist-info}/METADATA +1 -1
- {pulumi_aws_native-1.38.0a1761803003.dist-info → pulumi_aws_native-1.38.0a1762272920.dist-info}/RECORD +88 -72
- {pulumi_aws_native-1.38.0a1761803003.dist-info → pulumi_aws_native-1.38.0a1762272920.dist-info}/WHEEL +0 -0
- {pulumi_aws_native-1.38.0a1761803003.dist-info → pulumi_aws_native-1.38.0a1762272920.dist-info}/top_level.txt +0 -0
pulumi_aws_native/eks/_inputs.py
CHANGED
|
@@ -72,6 +72,8 @@ __all__ = [
|
|
|
72
72
|
'NamespaceConfigPropertiesArgsDict',
|
|
73
73
|
'NodegroupLaunchTemplateSpecificationArgs',
|
|
74
74
|
'NodegroupLaunchTemplateSpecificationArgsDict',
|
|
75
|
+
'NodegroupNodeRepairConfigOverridesArgs',
|
|
76
|
+
'NodegroupNodeRepairConfigOverridesArgsDict',
|
|
75
77
|
'NodegroupNodeRepairConfigArgs',
|
|
76
78
|
'NodegroupNodeRepairConfigArgsDict',
|
|
77
79
|
'NodegroupRemoteAccessArgs',
|
|
@@ -1645,6 +1647,102 @@ class NodegroupLaunchTemplateSpecificationArgs:
|
|
|
1645
1647
|
pulumi.set(self, "version", value)
|
|
1646
1648
|
|
|
1647
1649
|
|
|
1650
|
+
if not MYPY:
|
|
1651
|
+
class NodegroupNodeRepairConfigOverridesArgsDict(TypedDict):
|
|
1652
|
+
"""
|
|
1653
|
+
Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1654
|
+
"""
|
|
1655
|
+
min_repair_wait_time_mins: NotRequired[pulumi.Input[_builtins.int]]
|
|
1656
|
+
"""
|
|
1657
|
+
Specify the minimum time in minutes to wait before attempting to repair a node with this specific NodeMonitoringCondition and NodeUnhealthyReason.
|
|
1658
|
+
"""
|
|
1659
|
+
node_monitoring_condition: NotRequired[pulumi.Input[_builtins.str]]
|
|
1660
|
+
"""
|
|
1661
|
+
Specify an unhealthy condition reported by the node monitoring agent that this override would apply to.
|
|
1662
|
+
"""
|
|
1663
|
+
node_unhealthy_reason: NotRequired[pulumi.Input[_builtins.str]]
|
|
1664
|
+
"""
|
|
1665
|
+
Specify a reason reported by the node monitoring agent that this override would apply to.
|
|
1666
|
+
"""
|
|
1667
|
+
repair_action: NotRequired[pulumi.Input['NodegroupNodeRepairConfigOverridesRepairAction']]
|
|
1668
|
+
"""
|
|
1669
|
+
Specify the repair action to take for nodes when all of the specified conditions are met.
|
|
1670
|
+
"""
|
|
1671
|
+
elif False:
|
|
1672
|
+
NodegroupNodeRepairConfigOverridesArgsDict: TypeAlias = Mapping[str, Any]
|
|
1673
|
+
|
|
1674
|
+
@pulumi.input_type
|
|
1675
|
+
class NodegroupNodeRepairConfigOverridesArgs:
|
|
1676
|
+
def __init__(__self__, *,
|
|
1677
|
+
min_repair_wait_time_mins: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1678
|
+
node_monitoring_condition: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1679
|
+
node_unhealthy_reason: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1680
|
+
repair_action: Optional[pulumi.Input['NodegroupNodeRepairConfigOverridesRepairAction']] = None):
|
|
1681
|
+
"""
|
|
1682
|
+
Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1683
|
+
:param pulumi.Input[_builtins.int] min_repair_wait_time_mins: Specify the minimum time in minutes to wait before attempting to repair a node with this specific NodeMonitoringCondition and NodeUnhealthyReason.
|
|
1684
|
+
:param pulumi.Input[_builtins.str] node_monitoring_condition: Specify an unhealthy condition reported by the node monitoring agent that this override would apply to.
|
|
1685
|
+
:param pulumi.Input[_builtins.str] node_unhealthy_reason: Specify a reason reported by the node monitoring agent that this override would apply to.
|
|
1686
|
+
:param pulumi.Input['NodegroupNodeRepairConfigOverridesRepairAction'] repair_action: Specify the repair action to take for nodes when all of the specified conditions are met.
|
|
1687
|
+
"""
|
|
1688
|
+
if min_repair_wait_time_mins is not None:
|
|
1689
|
+
pulumi.set(__self__, "min_repair_wait_time_mins", min_repair_wait_time_mins)
|
|
1690
|
+
if node_monitoring_condition is not None:
|
|
1691
|
+
pulumi.set(__self__, "node_monitoring_condition", node_monitoring_condition)
|
|
1692
|
+
if node_unhealthy_reason is not None:
|
|
1693
|
+
pulumi.set(__self__, "node_unhealthy_reason", node_unhealthy_reason)
|
|
1694
|
+
if repair_action is not None:
|
|
1695
|
+
pulumi.set(__self__, "repair_action", repair_action)
|
|
1696
|
+
|
|
1697
|
+
@_builtins.property
|
|
1698
|
+
@pulumi.getter(name="minRepairWaitTimeMins")
|
|
1699
|
+
def min_repair_wait_time_mins(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1700
|
+
"""
|
|
1701
|
+
Specify the minimum time in minutes to wait before attempting to repair a node with this specific NodeMonitoringCondition and NodeUnhealthyReason.
|
|
1702
|
+
"""
|
|
1703
|
+
return pulumi.get(self, "min_repair_wait_time_mins")
|
|
1704
|
+
|
|
1705
|
+
@min_repair_wait_time_mins.setter
|
|
1706
|
+
def min_repair_wait_time_mins(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1707
|
+
pulumi.set(self, "min_repair_wait_time_mins", value)
|
|
1708
|
+
|
|
1709
|
+
@_builtins.property
|
|
1710
|
+
@pulumi.getter(name="nodeMonitoringCondition")
|
|
1711
|
+
def node_monitoring_condition(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1712
|
+
"""
|
|
1713
|
+
Specify an unhealthy condition reported by the node monitoring agent that this override would apply to.
|
|
1714
|
+
"""
|
|
1715
|
+
return pulumi.get(self, "node_monitoring_condition")
|
|
1716
|
+
|
|
1717
|
+
@node_monitoring_condition.setter
|
|
1718
|
+
def node_monitoring_condition(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1719
|
+
pulumi.set(self, "node_monitoring_condition", value)
|
|
1720
|
+
|
|
1721
|
+
@_builtins.property
|
|
1722
|
+
@pulumi.getter(name="nodeUnhealthyReason")
|
|
1723
|
+
def node_unhealthy_reason(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1724
|
+
"""
|
|
1725
|
+
Specify a reason reported by the node monitoring agent that this override would apply to.
|
|
1726
|
+
"""
|
|
1727
|
+
return pulumi.get(self, "node_unhealthy_reason")
|
|
1728
|
+
|
|
1729
|
+
@node_unhealthy_reason.setter
|
|
1730
|
+
def node_unhealthy_reason(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1731
|
+
pulumi.set(self, "node_unhealthy_reason", value)
|
|
1732
|
+
|
|
1733
|
+
@_builtins.property
|
|
1734
|
+
@pulumi.getter(name="repairAction")
|
|
1735
|
+
def repair_action(self) -> Optional[pulumi.Input['NodegroupNodeRepairConfigOverridesRepairAction']]:
|
|
1736
|
+
"""
|
|
1737
|
+
Specify the repair action to take for nodes when all of the specified conditions are met.
|
|
1738
|
+
"""
|
|
1739
|
+
return pulumi.get(self, "repair_action")
|
|
1740
|
+
|
|
1741
|
+
@repair_action.setter
|
|
1742
|
+
def repair_action(self, value: Optional[pulumi.Input['NodegroupNodeRepairConfigOverridesRepairAction']]):
|
|
1743
|
+
pulumi.set(self, "repair_action", value)
|
|
1744
|
+
|
|
1745
|
+
|
|
1648
1746
|
if not MYPY:
|
|
1649
1747
|
class NodegroupNodeRepairConfigArgsDict(TypedDict):
|
|
1650
1748
|
"""
|
|
@@ -1654,19 +1752,59 @@ if not MYPY:
|
|
|
1654
1752
|
"""
|
|
1655
1753
|
Set this value to true to enable node auto repair for the node group.
|
|
1656
1754
|
"""
|
|
1755
|
+
max_parallel_nodes_repaired_count: NotRequired[pulumi.Input[_builtins.int]]
|
|
1756
|
+
"""
|
|
1757
|
+
Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a count of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedPercentage at the same time.
|
|
1758
|
+
"""
|
|
1759
|
+
max_parallel_nodes_repaired_percentage: NotRequired[pulumi.Input[_builtins.int]]
|
|
1760
|
+
"""
|
|
1761
|
+
Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a percentage of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedCount at the same time.
|
|
1762
|
+
"""
|
|
1763
|
+
max_unhealthy_node_threshold_count: NotRequired[pulumi.Input[_builtins.int]]
|
|
1764
|
+
"""
|
|
1765
|
+
Specify a count threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdPercentage at the same time.
|
|
1766
|
+
"""
|
|
1767
|
+
max_unhealthy_node_threshold_percentage: NotRequired[pulumi.Input[_builtins.int]]
|
|
1768
|
+
"""
|
|
1769
|
+
Specify a percentage threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdCount at the same time.
|
|
1770
|
+
"""
|
|
1771
|
+
node_repair_config_overrides: NotRequired[pulumi.Input[Sequence[pulumi.Input['NodegroupNodeRepairConfigOverridesArgsDict']]]]
|
|
1772
|
+
"""
|
|
1773
|
+
Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1774
|
+
"""
|
|
1657
1775
|
elif False:
|
|
1658
1776
|
NodegroupNodeRepairConfigArgsDict: TypeAlias = Mapping[str, Any]
|
|
1659
1777
|
|
|
1660
1778
|
@pulumi.input_type
|
|
1661
1779
|
class NodegroupNodeRepairConfigArgs:
|
|
1662
1780
|
def __init__(__self__, *,
|
|
1663
|
-
enabled: Optional[pulumi.Input[_builtins.bool]] = None
|
|
1781
|
+
enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1782
|
+
max_parallel_nodes_repaired_count: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1783
|
+
max_parallel_nodes_repaired_percentage: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1784
|
+
max_unhealthy_node_threshold_count: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1785
|
+
max_unhealthy_node_threshold_percentage: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1786
|
+
node_repair_config_overrides: Optional[pulumi.Input[Sequence[pulumi.Input['NodegroupNodeRepairConfigOverridesArgs']]]] = None):
|
|
1664
1787
|
"""
|
|
1665
1788
|
The node auto repair configuration for node group.
|
|
1666
1789
|
:param pulumi.Input[_builtins.bool] enabled: Set this value to true to enable node auto repair for the node group.
|
|
1790
|
+
:param pulumi.Input[_builtins.int] max_parallel_nodes_repaired_count: Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a count of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedPercentage at the same time.
|
|
1791
|
+
:param pulumi.Input[_builtins.int] max_parallel_nodes_repaired_percentage: Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a percentage of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedCount at the same time.
|
|
1792
|
+
:param pulumi.Input[_builtins.int] max_unhealthy_node_threshold_count: Specify a count threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdPercentage at the same time.
|
|
1793
|
+
:param pulumi.Input[_builtins.int] max_unhealthy_node_threshold_percentage: Specify a percentage threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdCount at the same time.
|
|
1794
|
+
:param pulumi.Input[Sequence[pulumi.Input['NodegroupNodeRepairConfigOverridesArgs']]] node_repair_config_overrides: Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1667
1795
|
"""
|
|
1668
1796
|
if enabled is not None:
|
|
1669
1797
|
pulumi.set(__self__, "enabled", enabled)
|
|
1798
|
+
if max_parallel_nodes_repaired_count is not None:
|
|
1799
|
+
pulumi.set(__self__, "max_parallel_nodes_repaired_count", max_parallel_nodes_repaired_count)
|
|
1800
|
+
if max_parallel_nodes_repaired_percentage is not None:
|
|
1801
|
+
pulumi.set(__self__, "max_parallel_nodes_repaired_percentage", max_parallel_nodes_repaired_percentage)
|
|
1802
|
+
if max_unhealthy_node_threshold_count is not None:
|
|
1803
|
+
pulumi.set(__self__, "max_unhealthy_node_threshold_count", max_unhealthy_node_threshold_count)
|
|
1804
|
+
if max_unhealthy_node_threshold_percentage is not None:
|
|
1805
|
+
pulumi.set(__self__, "max_unhealthy_node_threshold_percentage", max_unhealthy_node_threshold_percentage)
|
|
1806
|
+
if node_repair_config_overrides is not None:
|
|
1807
|
+
pulumi.set(__self__, "node_repair_config_overrides", node_repair_config_overrides)
|
|
1670
1808
|
|
|
1671
1809
|
@_builtins.property
|
|
1672
1810
|
@pulumi.getter
|
|
@@ -1680,6 +1818,66 @@ class NodegroupNodeRepairConfigArgs:
|
|
|
1680
1818
|
def enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1681
1819
|
pulumi.set(self, "enabled", value)
|
|
1682
1820
|
|
|
1821
|
+
@_builtins.property
|
|
1822
|
+
@pulumi.getter(name="maxParallelNodesRepairedCount")
|
|
1823
|
+
def max_parallel_nodes_repaired_count(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1824
|
+
"""
|
|
1825
|
+
Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a count of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedPercentage at the same time.
|
|
1826
|
+
"""
|
|
1827
|
+
return pulumi.get(self, "max_parallel_nodes_repaired_count")
|
|
1828
|
+
|
|
1829
|
+
@max_parallel_nodes_repaired_count.setter
|
|
1830
|
+
def max_parallel_nodes_repaired_count(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1831
|
+
pulumi.set(self, "max_parallel_nodes_repaired_count", value)
|
|
1832
|
+
|
|
1833
|
+
@_builtins.property
|
|
1834
|
+
@pulumi.getter(name="maxParallelNodesRepairedPercentage")
|
|
1835
|
+
def max_parallel_nodes_repaired_percentage(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1836
|
+
"""
|
|
1837
|
+
Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a percentage of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedCount at the same time.
|
|
1838
|
+
"""
|
|
1839
|
+
return pulumi.get(self, "max_parallel_nodes_repaired_percentage")
|
|
1840
|
+
|
|
1841
|
+
@max_parallel_nodes_repaired_percentage.setter
|
|
1842
|
+
def max_parallel_nodes_repaired_percentage(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1843
|
+
pulumi.set(self, "max_parallel_nodes_repaired_percentage", value)
|
|
1844
|
+
|
|
1845
|
+
@_builtins.property
|
|
1846
|
+
@pulumi.getter(name="maxUnhealthyNodeThresholdCount")
|
|
1847
|
+
def max_unhealthy_node_threshold_count(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1848
|
+
"""
|
|
1849
|
+
Specify a count threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdPercentage at the same time.
|
|
1850
|
+
"""
|
|
1851
|
+
return pulumi.get(self, "max_unhealthy_node_threshold_count")
|
|
1852
|
+
|
|
1853
|
+
@max_unhealthy_node_threshold_count.setter
|
|
1854
|
+
def max_unhealthy_node_threshold_count(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1855
|
+
pulumi.set(self, "max_unhealthy_node_threshold_count", value)
|
|
1856
|
+
|
|
1857
|
+
@_builtins.property
|
|
1858
|
+
@pulumi.getter(name="maxUnhealthyNodeThresholdPercentage")
|
|
1859
|
+
def max_unhealthy_node_threshold_percentage(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1860
|
+
"""
|
|
1861
|
+
Specify a percentage threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdCount at the same time.
|
|
1862
|
+
"""
|
|
1863
|
+
return pulumi.get(self, "max_unhealthy_node_threshold_percentage")
|
|
1864
|
+
|
|
1865
|
+
@max_unhealthy_node_threshold_percentage.setter
|
|
1866
|
+
def max_unhealthy_node_threshold_percentage(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1867
|
+
pulumi.set(self, "max_unhealthy_node_threshold_percentage", value)
|
|
1868
|
+
|
|
1869
|
+
@_builtins.property
|
|
1870
|
+
@pulumi.getter(name="nodeRepairConfigOverrides")
|
|
1871
|
+
def node_repair_config_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NodegroupNodeRepairConfigOverridesArgs']]]]:
|
|
1872
|
+
"""
|
|
1873
|
+
Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1874
|
+
"""
|
|
1875
|
+
return pulumi.get(self, "node_repair_config_overrides")
|
|
1876
|
+
|
|
1877
|
+
@node_repair_config_overrides.setter
|
|
1878
|
+
def node_repair_config_overrides(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NodegroupNodeRepairConfigOverridesArgs']]]]):
|
|
1879
|
+
pulumi.set(self, "node_repair_config_overrides", value)
|
|
1880
|
+
|
|
1683
1881
|
|
|
1684
1882
|
if not MYPY:
|
|
1685
1883
|
class NodegroupRemoteAccessArgsDict(TypedDict):
|
pulumi_aws_native/eks/outputs.py
CHANGED
|
@@ -46,6 +46,7 @@ __all__ = [
|
|
|
46
46
|
'NamespaceConfigProperties',
|
|
47
47
|
'NodegroupLaunchTemplateSpecification',
|
|
48
48
|
'NodegroupNodeRepairConfig',
|
|
49
|
+
'NodegroupNodeRepairConfigOverrides',
|
|
49
50
|
'NodegroupRemoteAccess',
|
|
50
51
|
'NodegroupScalingConfig',
|
|
51
52
|
'NodegroupTaint',
|
|
@@ -1317,14 +1318,59 @@ class NodegroupNodeRepairConfig(dict):
|
|
|
1317
1318
|
"""
|
|
1318
1319
|
The node auto repair configuration for node group.
|
|
1319
1320
|
"""
|
|
1321
|
+
@staticmethod
|
|
1322
|
+
def __key_warning(key: str):
|
|
1323
|
+
suggest = None
|
|
1324
|
+
if key == "maxParallelNodesRepairedCount":
|
|
1325
|
+
suggest = "max_parallel_nodes_repaired_count"
|
|
1326
|
+
elif key == "maxParallelNodesRepairedPercentage":
|
|
1327
|
+
suggest = "max_parallel_nodes_repaired_percentage"
|
|
1328
|
+
elif key == "maxUnhealthyNodeThresholdCount":
|
|
1329
|
+
suggest = "max_unhealthy_node_threshold_count"
|
|
1330
|
+
elif key == "maxUnhealthyNodeThresholdPercentage":
|
|
1331
|
+
suggest = "max_unhealthy_node_threshold_percentage"
|
|
1332
|
+
elif key == "nodeRepairConfigOverrides":
|
|
1333
|
+
suggest = "node_repair_config_overrides"
|
|
1334
|
+
|
|
1335
|
+
if suggest:
|
|
1336
|
+
pulumi.log.warn(f"Key '{key}' not found in NodegroupNodeRepairConfig. Access the value via the '{suggest}' property getter instead.")
|
|
1337
|
+
|
|
1338
|
+
def __getitem__(self, key: str) -> Any:
|
|
1339
|
+
NodegroupNodeRepairConfig.__key_warning(key)
|
|
1340
|
+
return super().__getitem__(key)
|
|
1341
|
+
|
|
1342
|
+
def get(self, key: str, default = None) -> Any:
|
|
1343
|
+
NodegroupNodeRepairConfig.__key_warning(key)
|
|
1344
|
+
return super().get(key, default)
|
|
1345
|
+
|
|
1320
1346
|
def __init__(__self__, *,
|
|
1321
|
-
enabled: Optional[_builtins.bool] = None
|
|
1347
|
+
enabled: Optional[_builtins.bool] = None,
|
|
1348
|
+
max_parallel_nodes_repaired_count: Optional[_builtins.int] = None,
|
|
1349
|
+
max_parallel_nodes_repaired_percentage: Optional[_builtins.int] = None,
|
|
1350
|
+
max_unhealthy_node_threshold_count: Optional[_builtins.int] = None,
|
|
1351
|
+
max_unhealthy_node_threshold_percentage: Optional[_builtins.int] = None,
|
|
1352
|
+
node_repair_config_overrides: Optional[Sequence['outputs.NodegroupNodeRepairConfigOverrides']] = None):
|
|
1322
1353
|
"""
|
|
1323
1354
|
The node auto repair configuration for node group.
|
|
1324
1355
|
:param _builtins.bool enabled: Set this value to true to enable node auto repair for the node group.
|
|
1356
|
+
:param _builtins.int max_parallel_nodes_repaired_count: Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a count of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedPercentage at the same time.
|
|
1357
|
+
:param _builtins.int max_parallel_nodes_repaired_percentage: Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a percentage of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedCount at the same time.
|
|
1358
|
+
:param _builtins.int max_unhealthy_node_threshold_count: Specify a count threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdPercentage at the same time.
|
|
1359
|
+
:param _builtins.int max_unhealthy_node_threshold_percentage: Specify a percentage threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdCount at the same time.
|
|
1360
|
+
:param Sequence['NodegroupNodeRepairConfigOverrides'] node_repair_config_overrides: Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1325
1361
|
"""
|
|
1326
1362
|
if enabled is not None:
|
|
1327
1363
|
pulumi.set(__self__, "enabled", enabled)
|
|
1364
|
+
if max_parallel_nodes_repaired_count is not None:
|
|
1365
|
+
pulumi.set(__self__, "max_parallel_nodes_repaired_count", max_parallel_nodes_repaired_count)
|
|
1366
|
+
if max_parallel_nodes_repaired_percentage is not None:
|
|
1367
|
+
pulumi.set(__self__, "max_parallel_nodes_repaired_percentage", max_parallel_nodes_repaired_percentage)
|
|
1368
|
+
if max_unhealthy_node_threshold_count is not None:
|
|
1369
|
+
pulumi.set(__self__, "max_unhealthy_node_threshold_count", max_unhealthy_node_threshold_count)
|
|
1370
|
+
if max_unhealthy_node_threshold_percentage is not None:
|
|
1371
|
+
pulumi.set(__self__, "max_unhealthy_node_threshold_percentage", max_unhealthy_node_threshold_percentage)
|
|
1372
|
+
if node_repair_config_overrides is not None:
|
|
1373
|
+
pulumi.set(__self__, "node_repair_config_overrides", node_repair_config_overrides)
|
|
1328
1374
|
|
|
1329
1375
|
@_builtins.property
|
|
1330
1376
|
@pulumi.getter
|
|
@@ -1334,6 +1380,128 @@ class NodegroupNodeRepairConfig(dict):
|
|
|
1334
1380
|
"""
|
|
1335
1381
|
return pulumi.get(self, "enabled")
|
|
1336
1382
|
|
|
1383
|
+
@_builtins.property
|
|
1384
|
+
@pulumi.getter(name="maxParallelNodesRepairedCount")
|
|
1385
|
+
def max_parallel_nodes_repaired_count(self) -> Optional[_builtins.int]:
|
|
1386
|
+
"""
|
|
1387
|
+
Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a count of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedPercentage at the same time.
|
|
1388
|
+
"""
|
|
1389
|
+
return pulumi.get(self, "max_parallel_nodes_repaired_count")
|
|
1390
|
+
|
|
1391
|
+
@_builtins.property
|
|
1392
|
+
@pulumi.getter(name="maxParallelNodesRepairedPercentage")
|
|
1393
|
+
def max_parallel_nodes_repaired_percentage(self) -> Optional[_builtins.int]:
|
|
1394
|
+
"""
|
|
1395
|
+
Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a percentage of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedCount at the same time.
|
|
1396
|
+
"""
|
|
1397
|
+
return pulumi.get(self, "max_parallel_nodes_repaired_percentage")
|
|
1398
|
+
|
|
1399
|
+
@_builtins.property
|
|
1400
|
+
@pulumi.getter(name="maxUnhealthyNodeThresholdCount")
|
|
1401
|
+
def max_unhealthy_node_threshold_count(self) -> Optional[_builtins.int]:
|
|
1402
|
+
"""
|
|
1403
|
+
Specify a count threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdPercentage at the same time.
|
|
1404
|
+
"""
|
|
1405
|
+
return pulumi.get(self, "max_unhealthy_node_threshold_count")
|
|
1406
|
+
|
|
1407
|
+
@_builtins.property
|
|
1408
|
+
@pulumi.getter(name="maxUnhealthyNodeThresholdPercentage")
|
|
1409
|
+
def max_unhealthy_node_threshold_percentage(self) -> Optional[_builtins.int]:
|
|
1410
|
+
"""
|
|
1411
|
+
Specify a percentage threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdCount at the same time.
|
|
1412
|
+
"""
|
|
1413
|
+
return pulumi.get(self, "max_unhealthy_node_threshold_percentage")
|
|
1414
|
+
|
|
1415
|
+
@_builtins.property
|
|
1416
|
+
@pulumi.getter(name="nodeRepairConfigOverrides")
|
|
1417
|
+
def node_repair_config_overrides(self) -> Optional[Sequence['outputs.NodegroupNodeRepairConfigOverrides']]:
|
|
1418
|
+
"""
|
|
1419
|
+
Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1420
|
+
"""
|
|
1421
|
+
return pulumi.get(self, "node_repair_config_overrides")
|
|
1422
|
+
|
|
1423
|
+
|
|
1424
|
+
@pulumi.output_type
|
|
1425
|
+
class NodegroupNodeRepairConfigOverrides(dict):
|
|
1426
|
+
"""
|
|
1427
|
+
Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1428
|
+
"""
|
|
1429
|
+
@staticmethod
|
|
1430
|
+
def __key_warning(key: str):
|
|
1431
|
+
suggest = None
|
|
1432
|
+
if key == "minRepairWaitTimeMins":
|
|
1433
|
+
suggest = "min_repair_wait_time_mins"
|
|
1434
|
+
elif key == "nodeMonitoringCondition":
|
|
1435
|
+
suggest = "node_monitoring_condition"
|
|
1436
|
+
elif key == "nodeUnhealthyReason":
|
|
1437
|
+
suggest = "node_unhealthy_reason"
|
|
1438
|
+
elif key == "repairAction":
|
|
1439
|
+
suggest = "repair_action"
|
|
1440
|
+
|
|
1441
|
+
if suggest:
|
|
1442
|
+
pulumi.log.warn(f"Key '{key}' not found in NodegroupNodeRepairConfigOverrides. Access the value via the '{suggest}' property getter instead.")
|
|
1443
|
+
|
|
1444
|
+
def __getitem__(self, key: str) -> Any:
|
|
1445
|
+
NodegroupNodeRepairConfigOverrides.__key_warning(key)
|
|
1446
|
+
return super().__getitem__(key)
|
|
1447
|
+
|
|
1448
|
+
def get(self, key: str, default = None) -> Any:
|
|
1449
|
+
NodegroupNodeRepairConfigOverrides.__key_warning(key)
|
|
1450
|
+
return super().get(key, default)
|
|
1451
|
+
|
|
1452
|
+
def __init__(__self__, *,
|
|
1453
|
+
min_repair_wait_time_mins: Optional[_builtins.int] = None,
|
|
1454
|
+
node_monitoring_condition: Optional[_builtins.str] = None,
|
|
1455
|
+
node_unhealthy_reason: Optional[_builtins.str] = None,
|
|
1456
|
+
repair_action: Optional['NodegroupNodeRepairConfigOverridesRepairAction'] = None):
|
|
1457
|
+
"""
|
|
1458
|
+
Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1459
|
+
:param _builtins.int min_repair_wait_time_mins: Specify the minimum time in minutes to wait before attempting to repair a node with this specific NodeMonitoringCondition and NodeUnhealthyReason.
|
|
1460
|
+
:param _builtins.str node_monitoring_condition: Specify an unhealthy condition reported by the node monitoring agent that this override would apply to.
|
|
1461
|
+
:param _builtins.str node_unhealthy_reason: Specify a reason reported by the node monitoring agent that this override would apply to.
|
|
1462
|
+
:param 'NodegroupNodeRepairConfigOverridesRepairAction' repair_action: Specify the repair action to take for nodes when all of the specified conditions are met.
|
|
1463
|
+
"""
|
|
1464
|
+
if min_repair_wait_time_mins is not None:
|
|
1465
|
+
pulumi.set(__self__, "min_repair_wait_time_mins", min_repair_wait_time_mins)
|
|
1466
|
+
if node_monitoring_condition is not None:
|
|
1467
|
+
pulumi.set(__self__, "node_monitoring_condition", node_monitoring_condition)
|
|
1468
|
+
if node_unhealthy_reason is not None:
|
|
1469
|
+
pulumi.set(__self__, "node_unhealthy_reason", node_unhealthy_reason)
|
|
1470
|
+
if repair_action is not None:
|
|
1471
|
+
pulumi.set(__self__, "repair_action", repair_action)
|
|
1472
|
+
|
|
1473
|
+
@_builtins.property
|
|
1474
|
+
@pulumi.getter(name="minRepairWaitTimeMins")
|
|
1475
|
+
def min_repair_wait_time_mins(self) -> Optional[_builtins.int]:
|
|
1476
|
+
"""
|
|
1477
|
+
Specify the minimum time in minutes to wait before attempting to repair a node with this specific NodeMonitoringCondition and NodeUnhealthyReason.
|
|
1478
|
+
"""
|
|
1479
|
+
return pulumi.get(self, "min_repair_wait_time_mins")
|
|
1480
|
+
|
|
1481
|
+
@_builtins.property
|
|
1482
|
+
@pulumi.getter(name="nodeMonitoringCondition")
|
|
1483
|
+
def node_monitoring_condition(self) -> Optional[_builtins.str]:
|
|
1484
|
+
"""
|
|
1485
|
+
Specify an unhealthy condition reported by the node monitoring agent that this override would apply to.
|
|
1486
|
+
"""
|
|
1487
|
+
return pulumi.get(self, "node_monitoring_condition")
|
|
1488
|
+
|
|
1489
|
+
@_builtins.property
|
|
1490
|
+
@pulumi.getter(name="nodeUnhealthyReason")
|
|
1491
|
+
def node_unhealthy_reason(self) -> Optional[_builtins.str]:
|
|
1492
|
+
"""
|
|
1493
|
+
Specify a reason reported by the node monitoring agent that this override would apply to.
|
|
1494
|
+
"""
|
|
1495
|
+
return pulumi.get(self, "node_unhealthy_reason")
|
|
1496
|
+
|
|
1497
|
+
@_builtins.property
|
|
1498
|
+
@pulumi.getter(name="repairAction")
|
|
1499
|
+
def repair_action(self) -> Optional['NodegroupNodeRepairConfigOverridesRepairAction']:
|
|
1500
|
+
"""
|
|
1501
|
+
Specify the repair action to take for nodes when all of the specified conditions are met.
|
|
1502
|
+
"""
|
|
1503
|
+
return pulumi.get(self, "repair_action")
|
|
1504
|
+
|
|
1337
1505
|
|
|
1338
1506
|
@pulumi.output_type
|
|
1339
1507
|
class NodegroupRemoteAccess(dict):
|
|
@@ -11,12 +11,14 @@ from .crawler import *
|
|
|
11
11
|
from .database import *
|
|
12
12
|
from .get_crawler import *
|
|
13
13
|
from .get_database import *
|
|
14
|
+
from .get_integration_resource_property import *
|
|
14
15
|
from .get_job import *
|
|
15
16
|
from .get_registry import *
|
|
16
17
|
from .get_schema import *
|
|
17
18
|
from .get_schema_version import *
|
|
18
19
|
from .get_trigger import *
|
|
19
20
|
from .get_usage_profile import *
|
|
21
|
+
from .integration_resource_property import *
|
|
20
22
|
from .job import *
|
|
21
23
|
from .registry import *
|
|
22
24
|
from .schema import *
|
|
@@ -66,6 +66,10 @@ __all__ = [
|
|
|
66
66
|
'SchemaVersionSchemaArgsDict',
|
|
67
67
|
'SchemaVersionArgs',
|
|
68
68
|
'SchemaVersionArgsDict',
|
|
69
|
+
'SourceProcessingPropertiesPropertiesArgs',
|
|
70
|
+
'SourceProcessingPropertiesPropertiesArgsDict',
|
|
71
|
+
'TargetProcessingPropertiesPropertiesArgs',
|
|
72
|
+
'TargetProcessingPropertiesPropertiesArgsDict',
|
|
69
73
|
'TriggerActionArgs',
|
|
70
74
|
'TriggerActionArgsDict',
|
|
71
75
|
'TriggerConditionArgs',
|
|
@@ -1980,6 +1984,136 @@ class SchemaVersionArgs:
|
|
|
1980
1984
|
pulumi.set(self, "version_number", value)
|
|
1981
1985
|
|
|
1982
1986
|
|
|
1987
|
+
if not MYPY:
|
|
1988
|
+
class SourceProcessingPropertiesPropertiesArgsDict(TypedDict):
|
|
1989
|
+
"""
|
|
1990
|
+
The resource properties associated with the integration source.
|
|
1991
|
+
"""
|
|
1992
|
+
role_arn: pulumi.Input[_builtins.str]
|
|
1993
|
+
"""
|
|
1994
|
+
The IAM role to access the Glue connection.
|
|
1995
|
+
"""
|
|
1996
|
+
elif False:
|
|
1997
|
+
SourceProcessingPropertiesPropertiesArgsDict: TypeAlias = Mapping[str, Any]
|
|
1998
|
+
|
|
1999
|
+
@pulumi.input_type
|
|
2000
|
+
class SourceProcessingPropertiesPropertiesArgs:
|
|
2001
|
+
def __init__(__self__, *,
|
|
2002
|
+
role_arn: pulumi.Input[_builtins.str]):
|
|
2003
|
+
"""
|
|
2004
|
+
The resource properties associated with the integration source.
|
|
2005
|
+
:param pulumi.Input[_builtins.str] role_arn: The IAM role to access the Glue connection.
|
|
2006
|
+
"""
|
|
2007
|
+
pulumi.set(__self__, "role_arn", role_arn)
|
|
2008
|
+
|
|
2009
|
+
@_builtins.property
|
|
2010
|
+
@pulumi.getter(name="roleArn")
|
|
2011
|
+
def role_arn(self) -> pulumi.Input[_builtins.str]:
|
|
2012
|
+
"""
|
|
2013
|
+
The IAM role to access the Glue connection.
|
|
2014
|
+
"""
|
|
2015
|
+
return pulumi.get(self, "role_arn")
|
|
2016
|
+
|
|
2017
|
+
@role_arn.setter
|
|
2018
|
+
def role_arn(self, value: pulumi.Input[_builtins.str]):
|
|
2019
|
+
pulumi.set(self, "role_arn", value)
|
|
2020
|
+
|
|
2021
|
+
|
|
2022
|
+
if not MYPY:
|
|
2023
|
+
class TargetProcessingPropertiesPropertiesArgsDict(TypedDict):
|
|
2024
|
+
"""
|
|
2025
|
+
The resource properties associated with the integration target.
|
|
2026
|
+
"""
|
|
2027
|
+
role_arn: pulumi.Input[_builtins.str]
|
|
2028
|
+
"""
|
|
2029
|
+
The IAM role to access the Glue database.
|
|
2030
|
+
"""
|
|
2031
|
+
connection_name: NotRequired[pulumi.Input[_builtins.str]]
|
|
2032
|
+
"""
|
|
2033
|
+
The Glue network connection to configure the Glue job running in the customer VPC.
|
|
2034
|
+
"""
|
|
2035
|
+
event_bus_arn: NotRequired[pulumi.Input[_builtins.str]]
|
|
2036
|
+
"""
|
|
2037
|
+
The ARN of an Eventbridge event bus to receive the integration status notification.
|
|
2038
|
+
"""
|
|
2039
|
+
kms_arn: NotRequired[pulumi.Input[_builtins.str]]
|
|
2040
|
+
"""
|
|
2041
|
+
The ARN of the KMS key used for encryption.
|
|
2042
|
+
"""
|
|
2043
|
+
elif False:
|
|
2044
|
+
TargetProcessingPropertiesPropertiesArgsDict: TypeAlias = Mapping[str, Any]
|
|
2045
|
+
|
|
2046
|
+
@pulumi.input_type
|
|
2047
|
+
class TargetProcessingPropertiesPropertiesArgs:
|
|
2048
|
+
def __init__(__self__, *,
|
|
2049
|
+
role_arn: pulumi.Input[_builtins.str],
|
|
2050
|
+
connection_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2051
|
+
event_bus_arn: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2052
|
+
kms_arn: Optional[pulumi.Input[_builtins.str]] = None):
|
|
2053
|
+
"""
|
|
2054
|
+
The resource properties associated with the integration target.
|
|
2055
|
+
:param pulumi.Input[_builtins.str] role_arn: The IAM role to access the Glue database.
|
|
2056
|
+
:param pulumi.Input[_builtins.str] connection_name: The Glue network connection to configure the Glue job running in the customer VPC.
|
|
2057
|
+
:param pulumi.Input[_builtins.str] event_bus_arn: The ARN of an Eventbridge event bus to receive the integration status notification.
|
|
2058
|
+
:param pulumi.Input[_builtins.str] kms_arn: The ARN of the KMS key used for encryption.
|
|
2059
|
+
"""
|
|
2060
|
+
pulumi.set(__self__, "role_arn", role_arn)
|
|
2061
|
+
if connection_name is not None:
|
|
2062
|
+
pulumi.set(__self__, "connection_name", connection_name)
|
|
2063
|
+
if event_bus_arn is not None:
|
|
2064
|
+
pulumi.set(__self__, "event_bus_arn", event_bus_arn)
|
|
2065
|
+
if kms_arn is not None:
|
|
2066
|
+
pulumi.set(__self__, "kms_arn", kms_arn)
|
|
2067
|
+
|
|
2068
|
+
@_builtins.property
|
|
2069
|
+
@pulumi.getter(name="roleArn")
|
|
2070
|
+
def role_arn(self) -> pulumi.Input[_builtins.str]:
|
|
2071
|
+
"""
|
|
2072
|
+
The IAM role to access the Glue database.
|
|
2073
|
+
"""
|
|
2074
|
+
return pulumi.get(self, "role_arn")
|
|
2075
|
+
|
|
2076
|
+
@role_arn.setter
|
|
2077
|
+
def role_arn(self, value: pulumi.Input[_builtins.str]):
|
|
2078
|
+
pulumi.set(self, "role_arn", value)
|
|
2079
|
+
|
|
2080
|
+
@_builtins.property
|
|
2081
|
+
@pulumi.getter(name="connectionName")
|
|
2082
|
+
def connection_name(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
2083
|
+
"""
|
|
2084
|
+
The Glue network connection to configure the Glue job running in the customer VPC.
|
|
2085
|
+
"""
|
|
2086
|
+
return pulumi.get(self, "connection_name")
|
|
2087
|
+
|
|
2088
|
+
@connection_name.setter
|
|
2089
|
+
def connection_name(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
2090
|
+
pulumi.set(self, "connection_name", value)
|
|
2091
|
+
|
|
2092
|
+
@_builtins.property
|
|
2093
|
+
@pulumi.getter(name="eventBusArn")
|
|
2094
|
+
def event_bus_arn(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
2095
|
+
"""
|
|
2096
|
+
The ARN of an Eventbridge event bus to receive the integration status notification.
|
|
2097
|
+
"""
|
|
2098
|
+
return pulumi.get(self, "event_bus_arn")
|
|
2099
|
+
|
|
2100
|
+
@event_bus_arn.setter
|
|
2101
|
+
def event_bus_arn(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
2102
|
+
pulumi.set(self, "event_bus_arn", value)
|
|
2103
|
+
|
|
2104
|
+
@_builtins.property
|
|
2105
|
+
@pulumi.getter(name="kmsArn")
|
|
2106
|
+
def kms_arn(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
2107
|
+
"""
|
|
2108
|
+
The ARN of the KMS key used for encryption.
|
|
2109
|
+
"""
|
|
2110
|
+
return pulumi.get(self, "kms_arn")
|
|
2111
|
+
|
|
2112
|
+
@kms_arn.setter
|
|
2113
|
+
def kms_arn(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
2114
|
+
pulumi.set(self, "kms_arn", value)
|
|
2115
|
+
|
|
2116
|
+
|
|
1983
2117
|
if not MYPY:
|
|
1984
2118
|
class TriggerActionArgsDict(TypedDict):
|
|
1985
2119
|
"""
|