pulumi-aws-native 1.38.0a1762150096__py3-none-any.whl → 1.38.0a1762176731__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-aws-native might be problematic. Click here for more details.
- pulumi_aws_native/__init__.py +13 -0
- pulumi_aws_native/batch/_inputs.py +9 -0
- pulumi_aws_native/batch/job_definition.py +8 -0
- pulumi_aws_native/batch/outputs.py +6 -0
- pulumi_aws_native/bedrock/automated_reasoning_policy.py +51 -0
- pulumi_aws_native/bedrock/get_automated_reasoning_policy.py +12 -1
- pulumi_aws_native/ce/_enums.py +3 -0
- pulumi_aws_native/cleanrooms/configured_table.py +4 -4
- pulumi_aws_native/cleanrooms/get_configured_table.py +1 -1
- pulumi_aws_native/connectcampaignsv2/_inputs.py +28 -0
- pulumi_aws_native/connectcampaignsv2/outputs.py +16 -0
- pulumi_aws_native/datazone/connection.py +30 -1
- pulumi_aws_native/ec2/get_volume.py +37 -15
- pulumi_aws_native/ec2/volume.py +115 -74
- pulumi_aws_native/ecs/_enums.py +8 -0
- pulumi_aws_native/ecs/_inputs.py +96 -15
- pulumi_aws_native/ecs/outputs.py +60 -10
- pulumi_aws_native/eks/_enums.py +11 -0
- pulumi_aws_native/eks/_inputs.py +199 -1
- pulumi_aws_native/eks/get_nodegroup.py +1 -0
- pulumi_aws_native/eks/nodegroup.py +1 -0
- pulumi_aws_native/eks/outputs.py +169 -1
- pulumi_aws_native/networkfirewall/_inputs.py +7 -0
- pulumi_aws_native/networkfirewall/firewall.py +3 -0
- pulumi_aws_native/networkfirewall/get_firewall.py +3 -0
- pulumi_aws_native/networkfirewall/outputs.py +4 -0
- pulumi_aws_native/pulumi-plugin.json +1 -1
- pulumi_aws_native/rtbfabric/_enums.py +3 -0
- pulumi_aws_native/rtbfabric/_inputs.py +70 -0
- pulumi_aws_native/rtbfabric/get_link.py +18 -0
- pulumi_aws_native/rtbfabric/get_requester_gateway.py +15 -0
- pulumi_aws_native/rtbfabric/get_responder_gateway.py +30 -0
- pulumi_aws_native/rtbfabric/link.py +51 -0
- pulumi_aws_native/rtbfabric/outputs.py +46 -0
- pulumi_aws_native/rtbfabric/requester_gateway.py +40 -0
- pulumi_aws_native/rtbfabric/responder_gateway.py +80 -0
- pulumi_aws_native/s3/_enums.py +1 -1
- pulumi_aws_native/s3/_inputs.py +11 -5
- pulumi_aws_native/s3/outputs.py +10 -4
- pulumi_aws_native/s3vectors/__init__.py +17 -0
- pulumi_aws_native/s3vectors/_enums.py +39 -0
- pulumi_aws_native/s3vectors/_inputs.py +138 -0
- pulumi_aws_native/s3vectors/get_index.py +79 -0
- pulumi_aws_native/s3vectors/get_vector_bucket.py +79 -0
- pulumi_aws_native/s3vectors/get_vector_bucket_policy.py +69 -0
- pulumi_aws_native/s3vectors/index.py +265 -0
- pulumi_aws_native/s3vectors/outputs.py +129 -0
- pulumi_aws_native/s3vectors/vector_bucket.py +157 -0
- pulumi_aws_native/s3vectors/vector_bucket_policy.py +164 -0
- pulumi_aws_native/sso/_enums.py +1 -1
- pulumi_aws_native/sso/assignment.py +8 -8
- pulumi_aws_native/transfer/_inputs.py +9 -0
- pulumi_aws_native/transfer/connector.py +3 -0
- pulumi_aws_native/transfer/get_connector.py +3 -0
- pulumi_aws_native/transfer/outputs.py +6 -0
- {pulumi_aws_native-1.38.0a1762150096.dist-info → pulumi_aws_native-1.38.0a1762176731.dist-info}/METADATA +1 -1
- {pulumi_aws_native-1.38.0a1762150096.dist-info → pulumi_aws_native-1.38.0a1762176731.dist-info}/RECORD +59 -49
- {pulumi_aws_native-1.38.0a1762150096.dist-info → pulumi_aws_native-1.38.0a1762176731.dist-info}/WHEEL +0 -0
- {pulumi_aws_native-1.38.0a1762150096.dist-info → pulumi_aws_native-1.38.0a1762176731.dist-info}/top_level.txt +0 -0
pulumi_aws_native/eks/_inputs.py
CHANGED
|
@@ -72,6 +72,8 @@ __all__ = [
|
|
|
72
72
|
'NamespaceConfigPropertiesArgsDict',
|
|
73
73
|
'NodegroupLaunchTemplateSpecificationArgs',
|
|
74
74
|
'NodegroupLaunchTemplateSpecificationArgsDict',
|
|
75
|
+
'NodegroupNodeRepairConfigOverridesArgs',
|
|
76
|
+
'NodegroupNodeRepairConfigOverridesArgsDict',
|
|
75
77
|
'NodegroupNodeRepairConfigArgs',
|
|
76
78
|
'NodegroupNodeRepairConfigArgsDict',
|
|
77
79
|
'NodegroupRemoteAccessArgs',
|
|
@@ -1645,6 +1647,102 @@ class NodegroupLaunchTemplateSpecificationArgs:
|
|
|
1645
1647
|
pulumi.set(self, "version", value)
|
|
1646
1648
|
|
|
1647
1649
|
|
|
1650
|
+
if not MYPY:
|
|
1651
|
+
class NodegroupNodeRepairConfigOverridesArgsDict(TypedDict):
|
|
1652
|
+
"""
|
|
1653
|
+
Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1654
|
+
"""
|
|
1655
|
+
min_repair_wait_time_mins: NotRequired[pulumi.Input[_builtins.int]]
|
|
1656
|
+
"""
|
|
1657
|
+
Specify the minimum time in minutes to wait before attempting to repair a node with this specific NodeMonitoringCondition and NodeUnhealthyReason.
|
|
1658
|
+
"""
|
|
1659
|
+
node_monitoring_condition: NotRequired[pulumi.Input[_builtins.str]]
|
|
1660
|
+
"""
|
|
1661
|
+
Specify an unhealthy condition reported by the node monitoring agent that this override would apply to.
|
|
1662
|
+
"""
|
|
1663
|
+
node_unhealthy_reason: NotRequired[pulumi.Input[_builtins.str]]
|
|
1664
|
+
"""
|
|
1665
|
+
Specify a reason reported by the node monitoring agent that this override would apply to.
|
|
1666
|
+
"""
|
|
1667
|
+
repair_action: NotRequired[pulumi.Input['NodegroupNodeRepairConfigOverridesRepairAction']]
|
|
1668
|
+
"""
|
|
1669
|
+
Specify the repair action to take for nodes when all of the specified conditions are met.
|
|
1670
|
+
"""
|
|
1671
|
+
elif False:
|
|
1672
|
+
NodegroupNodeRepairConfigOverridesArgsDict: TypeAlias = Mapping[str, Any]
|
|
1673
|
+
|
|
1674
|
+
@pulumi.input_type
|
|
1675
|
+
class NodegroupNodeRepairConfigOverridesArgs:
|
|
1676
|
+
def __init__(__self__, *,
|
|
1677
|
+
min_repair_wait_time_mins: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1678
|
+
node_monitoring_condition: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1679
|
+
node_unhealthy_reason: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1680
|
+
repair_action: Optional[pulumi.Input['NodegroupNodeRepairConfigOverridesRepairAction']] = None):
|
|
1681
|
+
"""
|
|
1682
|
+
Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1683
|
+
:param pulumi.Input[_builtins.int] min_repair_wait_time_mins: Specify the minimum time in minutes to wait before attempting to repair a node with this specific NodeMonitoringCondition and NodeUnhealthyReason.
|
|
1684
|
+
:param pulumi.Input[_builtins.str] node_monitoring_condition: Specify an unhealthy condition reported by the node monitoring agent that this override would apply to.
|
|
1685
|
+
:param pulumi.Input[_builtins.str] node_unhealthy_reason: Specify a reason reported by the node monitoring agent that this override would apply to.
|
|
1686
|
+
:param pulumi.Input['NodegroupNodeRepairConfigOverridesRepairAction'] repair_action: Specify the repair action to take for nodes when all of the specified conditions are met.
|
|
1687
|
+
"""
|
|
1688
|
+
if min_repair_wait_time_mins is not None:
|
|
1689
|
+
pulumi.set(__self__, "min_repair_wait_time_mins", min_repair_wait_time_mins)
|
|
1690
|
+
if node_monitoring_condition is not None:
|
|
1691
|
+
pulumi.set(__self__, "node_monitoring_condition", node_monitoring_condition)
|
|
1692
|
+
if node_unhealthy_reason is not None:
|
|
1693
|
+
pulumi.set(__self__, "node_unhealthy_reason", node_unhealthy_reason)
|
|
1694
|
+
if repair_action is not None:
|
|
1695
|
+
pulumi.set(__self__, "repair_action", repair_action)
|
|
1696
|
+
|
|
1697
|
+
@_builtins.property
|
|
1698
|
+
@pulumi.getter(name="minRepairWaitTimeMins")
|
|
1699
|
+
def min_repair_wait_time_mins(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1700
|
+
"""
|
|
1701
|
+
Specify the minimum time in minutes to wait before attempting to repair a node with this specific NodeMonitoringCondition and NodeUnhealthyReason.
|
|
1702
|
+
"""
|
|
1703
|
+
return pulumi.get(self, "min_repair_wait_time_mins")
|
|
1704
|
+
|
|
1705
|
+
@min_repair_wait_time_mins.setter
|
|
1706
|
+
def min_repair_wait_time_mins(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1707
|
+
pulumi.set(self, "min_repair_wait_time_mins", value)
|
|
1708
|
+
|
|
1709
|
+
@_builtins.property
|
|
1710
|
+
@pulumi.getter(name="nodeMonitoringCondition")
|
|
1711
|
+
def node_monitoring_condition(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1712
|
+
"""
|
|
1713
|
+
Specify an unhealthy condition reported by the node monitoring agent that this override would apply to.
|
|
1714
|
+
"""
|
|
1715
|
+
return pulumi.get(self, "node_monitoring_condition")
|
|
1716
|
+
|
|
1717
|
+
@node_monitoring_condition.setter
|
|
1718
|
+
def node_monitoring_condition(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1719
|
+
pulumi.set(self, "node_monitoring_condition", value)
|
|
1720
|
+
|
|
1721
|
+
@_builtins.property
|
|
1722
|
+
@pulumi.getter(name="nodeUnhealthyReason")
|
|
1723
|
+
def node_unhealthy_reason(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1724
|
+
"""
|
|
1725
|
+
Specify a reason reported by the node monitoring agent that this override would apply to.
|
|
1726
|
+
"""
|
|
1727
|
+
return pulumi.get(self, "node_unhealthy_reason")
|
|
1728
|
+
|
|
1729
|
+
@node_unhealthy_reason.setter
|
|
1730
|
+
def node_unhealthy_reason(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1731
|
+
pulumi.set(self, "node_unhealthy_reason", value)
|
|
1732
|
+
|
|
1733
|
+
@_builtins.property
|
|
1734
|
+
@pulumi.getter(name="repairAction")
|
|
1735
|
+
def repair_action(self) -> Optional[pulumi.Input['NodegroupNodeRepairConfigOverridesRepairAction']]:
|
|
1736
|
+
"""
|
|
1737
|
+
Specify the repair action to take for nodes when all of the specified conditions are met.
|
|
1738
|
+
"""
|
|
1739
|
+
return pulumi.get(self, "repair_action")
|
|
1740
|
+
|
|
1741
|
+
@repair_action.setter
|
|
1742
|
+
def repair_action(self, value: Optional[pulumi.Input['NodegroupNodeRepairConfigOverridesRepairAction']]):
|
|
1743
|
+
pulumi.set(self, "repair_action", value)
|
|
1744
|
+
|
|
1745
|
+
|
|
1648
1746
|
if not MYPY:
|
|
1649
1747
|
class NodegroupNodeRepairConfigArgsDict(TypedDict):
|
|
1650
1748
|
"""
|
|
@@ -1654,19 +1752,59 @@ if not MYPY:
|
|
|
1654
1752
|
"""
|
|
1655
1753
|
Set this value to true to enable node auto repair for the node group.
|
|
1656
1754
|
"""
|
|
1755
|
+
max_parallel_nodes_repaired_count: NotRequired[pulumi.Input[_builtins.int]]
|
|
1756
|
+
"""
|
|
1757
|
+
Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a count of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedPercentage at the same time.
|
|
1758
|
+
"""
|
|
1759
|
+
max_parallel_nodes_repaired_percentage: NotRequired[pulumi.Input[_builtins.int]]
|
|
1760
|
+
"""
|
|
1761
|
+
Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a percentage of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedCount at the same time.
|
|
1762
|
+
"""
|
|
1763
|
+
max_unhealthy_node_threshold_count: NotRequired[pulumi.Input[_builtins.int]]
|
|
1764
|
+
"""
|
|
1765
|
+
Specify a count threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdPercentage at the same time.
|
|
1766
|
+
"""
|
|
1767
|
+
max_unhealthy_node_threshold_percentage: NotRequired[pulumi.Input[_builtins.int]]
|
|
1768
|
+
"""
|
|
1769
|
+
Specify a percentage threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdCount at the same time.
|
|
1770
|
+
"""
|
|
1771
|
+
node_repair_config_overrides: NotRequired[pulumi.Input[Sequence[pulumi.Input['NodegroupNodeRepairConfigOverridesArgsDict']]]]
|
|
1772
|
+
"""
|
|
1773
|
+
Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1774
|
+
"""
|
|
1657
1775
|
elif False:
|
|
1658
1776
|
NodegroupNodeRepairConfigArgsDict: TypeAlias = Mapping[str, Any]
|
|
1659
1777
|
|
|
1660
1778
|
@pulumi.input_type
|
|
1661
1779
|
class NodegroupNodeRepairConfigArgs:
|
|
1662
1780
|
def __init__(__self__, *,
|
|
1663
|
-
enabled: Optional[pulumi.Input[_builtins.bool]] = None
|
|
1781
|
+
enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1782
|
+
max_parallel_nodes_repaired_count: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1783
|
+
max_parallel_nodes_repaired_percentage: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1784
|
+
max_unhealthy_node_threshold_count: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1785
|
+
max_unhealthy_node_threshold_percentage: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1786
|
+
node_repair_config_overrides: Optional[pulumi.Input[Sequence[pulumi.Input['NodegroupNodeRepairConfigOverridesArgs']]]] = None):
|
|
1664
1787
|
"""
|
|
1665
1788
|
The node auto repair configuration for node group.
|
|
1666
1789
|
:param pulumi.Input[_builtins.bool] enabled: Set this value to true to enable node auto repair for the node group.
|
|
1790
|
+
:param pulumi.Input[_builtins.int] max_parallel_nodes_repaired_count: Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a count of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedPercentage at the same time.
|
|
1791
|
+
:param pulumi.Input[_builtins.int] max_parallel_nodes_repaired_percentage: Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a percentage of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedCount at the same time.
|
|
1792
|
+
:param pulumi.Input[_builtins.int] max_unhealthy_node_threshold_count: Specify a count threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdPercentage at the same time.
|
|
1793
|
+
:param pulumi.Input[_builtins.int] max_unhealthy_node_threshold_percentage: Specify a percentage threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdCount at the same time.
|
|
1794
|
+
:param pulumi.Input[Sequence[pulumi.Input['NodegroupNodeRepairConfigOverridesArgs']]] node_repair_config_overrides: Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1667
1795
|
"""
|
|
1668
1796
|
if enabled is not None:
|
|
1669
1797
|
pulumi.set(__self__, "enabled", enabled)
|
|
1798
|
+
if max_parallel_nodes_repaired_count is not None:
|
|
1799
|
+
pulumi.set(__self__, "max_parallel_nodes_repaired_count", max_parallel_nodes_repaired_count)
|
|
1800
|
+
if max_parallel_nodes_repaired_percentage is not None:
|
|
1801
|
+
pulumi.set(__self__, "max_parallel_nodes_repaired_percentage", max_parallel_nodes_repaired_percentage)
|
|
1802
|
+
if max_unhealthy_node_threshold_count is not None:
|
|
1803
|
+
pulumi.set(__self__, "max_unhealthy_node_threshold_count", max_unhealthy_node_threshold_count)
|
|
1804
|
+
if max_unhealthy_node_threshold_percentage is not None:
|
|
1805
|
+
pulumi.set(__self__, "max_unhealthy_node_threshold_percentage", max_unhealthy_node_threshold_percentage)
|
|
1806
|
+
if node_repair_config_overrides is not None:
|
|
1807
|
+
pulumi.set(__self__, "node_repair_config_overrides", node_repair_config_overrides)
|
|
1670
1808
|
|
|
1671
1809
|
@_builtins.property
|
|
1672
1810
|
@pulumi.getter
|
|
@@ -1680,6 +1818,66 @@ class NodegroupNodeRepairConfigArgs:
|
|
|
1680
1818
|
def enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1681
1819
|
pulumi.set(self, "enabled", value)
|
|
1682
1820
|
|
|
1821
|
+
@_builtins.property
|
|
1822
|
+
@pulumi.getter(name="maxParallelNodesRepairedCount")
|
|
1823
|
+
def max_parallel_nodes_repaired_count(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1824
|
+
"""
|
|
1825
|
+
Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a count of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedPercentage at the same time.
|
|
1826
|
+
"""
|
|
1827
|
+
return pulumi.get(self, "max_parallel_nodes_repaired_count")
|
|
1828
|
+
|
|
1829
|
+
@max_parallel_nodes_repaired_count.setter
|
|
1830
|
+
def max_parallel_nodes_repaired_count(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1831
|
+
pulumi.set(self, "max_parallel_nodes_repaired_count", value)
|
|
1832
|
+
|
|
1833
|
+
@_builtins.property
|
|
1834
|
+
@pulumi.getter(name="maxParallelNodesRepairedPercentage")
|
|
1835
|
+
def max_parallel_nodes_repaired_percentage(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1836
|
+
"""
|
|
1837
|
+
Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a percentage of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedCount at the same time.
|
|
1838
|
+
"""
|
|
1839
|
+
return pulumi.get(self, "max_parallel_nodes_repaired_percentage")
|
|
1840
|
+
|
|
1841
|
+
@max_parallel_nodes_repaired_percentage.setter
|
|
1842
|
+
def max_parallel_nodes_repaired_percentage(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1843
|
+
pulumi.set(self, "max_parallel_nodes_repaired_percentage", value)
|
|
1844
|
+
|
|
1845
|
+
@_builtins.property
|
|
1846
|
+
@pulumi.getter(name="maxUnhealthyNodeThresholdCount")
|
|
1847
|
+
def max_unhealthy_node_threshold_count(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1848
|
+
"""
|
|
1849
|
+
Specify a count threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdPercentage at the same time.
|
|
1850
|
+
"""
|
|
1851
|
+
return pulumi.get(self, "max_unhealthy_node_threshold_count")
|
|
1852
|
+
|
|
1853
|
+
@max_unhealthy_node_threshold_count.setter
|
|
1854
|
+
def max_unhealthy_node_threshold_count(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1855
|
+
pulumi.set(self, "max_unhealthy_node_threshold_count", value)
|
|
1856
|
+
|
|
1857
|
+
@_builtins.property
|
|
1858
|
+
@pulumi.getter(name="maxUnhealthyNodeThresholdPercentage")
|
|
1859
|
+
def max_unhealthy_node_threshold_percentage(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1860
|
+
"""
|
|
1861
|
+
Specify a percentage threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdCount at the same time.
|
|
1862
|
+
"""
|
|
1863
|
+
return pulumi.get(self, "max_unhealthy_node_threshold_percentage")
|
|
1864
|
+
|
|
1865
|
+
@max_unhealthy_node_threshold_percentage.setter
|
|
1866
|
+
def max_unhealthy_node_threshold_percentage(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1867
|
+
pulumi.set(self, "max_unhealthy_node_threshold_percentage", value)
|
|
1868
|
+
|
|
1869
|
+
@_builtins.property
|
|
1870
|
+
@pulumi.getter(name="nodeRepairConfigOverrides")
|
|
1871
|
+
def node_repair_config_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NodegroupNodeRepairConfigOverridesArgs']]]]:
|
|
1872
|
+
"""
|
|
1873
|
+
Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1874
|
+
"""
|
|
1875
|
+
return pulumi.get(self, "node_repair_config_overrides")
|
|
1876
|
+
|
|
1877
|
+
@node_repair_config_overrides.setter
|
|
1878
|
+
def node_repair_config_overrides(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NodegroupNodeRepairConfigOverridesArgs']]]]):
|
|
1879
|
+
pulumi.set(self, "node_repair_config_overrides", value)
|
|
1880
|
+
|
|
1683
1881
|
|
|
1684
1882
|
if not MYPY:
|
|
1685
1883
|
class NodegroupRemoteAccessArgsDict(TypedDict):
|
pulumi_aws_native/eks/outputs.py
CHANGED
|
@@ -46,6 +46,7 @@ __all__ = [
|
|
|
46
46
|
'NamespaceConfigProperties',
|
|
47
47
|
'NodegroupLaunchTemplateSpecification',
|
|
48
48
|
'NodegroupNodeRepairConfig',
|
|
49
|
+
'NodegroupNodeRepairConfigOverrides',
|
|
49
50
|
'NodegroupRemoteAccess',
|
|
50
51
|
'NodegroupScalingConfig',
|
|
51
52
|
'NodegroupTaint',
|
|
@@ -1317,14 +1318,59 @@ class NodegroupNodeRepairConfig(dict):
|
|
|
1317
1318
|
"""
|
|
1318
1319
|
The node auto repair configuration for node group.
|
|
1319
1320
|
"""
|
|
1321
|
+
@staticmethod
|
|
1322
|
+
def __key_warning(key: str):
|
|
1323
|
+
suggest = None
|
|
1324
|
+
if key == "maxParallelNodesRepairedCount":
|
|
1325
|
+
suggest = "max_parallel_nodes_repaired_count"
|
|
1326
|
+
elif key == "maxParallelNodesRepairedPercentage":
|
|
1327
|
+
suggest = "max_parallel_nodes_repaired_percentage"
|
|
1328
|
+
elif key == "maxUnhealthyNodeThresholdCount":
|
|
1329
|
+
suggest = "max_unhealthy_node_threshold_count"
|
|
1330
|
+
elif key == "maxUnhealthyNodeThresholdPercentage":
|
|
1331
|
+
suggest = "max_unhealthy_node_threshold_percentage"
|
|
1332
|
+
elif key == "nodeRepairConfigOverrides":
|
|
1333
|
+
suggest = "node_repair_config_overrides"
|
|
1334
|
+
|
|
1335
|
+
if suggest:
|
|
1336
|
+
pulumi.log.warn(f"Key '{key}' not found in NodegroupNodeRepairConfig. Access the value via the '{suggest}' property getter instead.")
|
|
1337
|
+
|
|
1338
|
+
def __getitem__(self, key: str) -> Any:
|
|
1339
|
+
NodegroupNodeRepairConfig.__key_warning(key)
|
|
1340
|
+
return super().__getitem__(key)
|
|
1341
|
+
|
|
1342
|
+
def get(self, key: str, default = None) -> Any:
|
|
1343
|
+
NodegroupNodeRepairConfig.__key_warning(key)
|
|
1344
|
+
return super().get(key, default)
|
|
1345
|
+
|
|
1320
1346
|
def __init__(__self__, *,
|
|
1321
|
-
enabled: Optional[_builtins.bool] = None
|
|
1347
|
+
enabled: Optional[_builtins.bool] = None,
|
|
1348
|
+
max_parallel_nodes_repaired_count: Optional[_builtins.int] = None,
|
|
1349
|
+
max_parallel_nodes_repaired_percentage: Optional[_builtins.int] = None,
|
|
1350
|
+
max_unhealthy_node_threshold_count: Optional[_builtins.int] = None,
|
|
1351
|
+
max_unhealthy_node_threshold_percentage: Optional[_builtins.int] = None,
|
|
1352
|
+
node_repair_config_overrides: Optional[Sequence['outputs.NodegroupNodeRepairConfigOverrides']] = None):
|
|
1322
1353
|
"""
|
|
1323
1354
|
The node auto repair configuration for node group.
|
|
1324
1355
|
:param _builtins.bool enabled: Set this value to true to enable node auto repair for the node group.
|
|
1356
|
+
:param _builtins.int max_parallel_nodes_repaired_count: Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a count of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedPercentage at the same time.
|
|
1357
|
+
:param _builtins.int max_parallel_nodes_repaired_percentage: Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a percentage of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedCount at the same time.
|
|
1358
|
+
:param _builtins.int max_unhealthy_node_threshold_count: Specify a count threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdPercentage at the same time.
|
|
1359
|
+
:param _builtins.int max_unhealthy_node_threshold_percentage: Specify a percentage threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdCount at the same time.
|
|
1360
|
+
:param Sequence['NodegroupNodeRepairConfigOverrides'] node_repair_config_overrides: Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1325
1361
|
"""
|
|
1326
1362
|
if enabled is not None:
|
|
1327
1363
|
pulumi.set(__self__, "enabled", enabled)
|
|
1364
|
+
if max_parallel_nodes_repaired_count is not None:
|
|
1365
|
+
pulumi.set(__self__, "max_parallel_nodes_repaired_count", max_parallel_nodes_repaired_count)
|
|
1366
|
+
if max_parallel_nodes_repaired_percentage is not None:
|
|
1367
|
+
pulumi.set(__self__, "max_parallel_nodes_repaired_percentage", max_parallel_nodes_repaired_percentage)
|
|
1368
|
+
if max_unhealthy_node_threshold_count is not None:
|
|
1369
|
+
pulumi.set(__self__, "max_unhealthy_node_threshold_count", max_unhealthy_node_threshold_count)
|
|
1370
|
+
if max_unhealthy_node_threshold_percentage is not None:
|
|
1371
|
+
pulumi.set(__self__, "max_unhealthy_node_threshold_percentage", max_unhealthy_node_threshold_percentage)
|
|
1372
|
+
if node_repair_config_overrides is not None:
|
|
1373
|
+
pulumi.set(__self__, "node_repair_config_overrides", node_repair_config_overrides)
|
|
1328
1374
|
|
|
1329
1375
|
@_builtins.property
|
|
1330
1376
|
@pulumi.getter
|
|
@@ -1334,6 +1380,128 @@ class NodegroupNodeRepairConfig(dict):
|
|
|
1334
1380
|
"""
|
|
1335
1381
|
return pulumi.get(self, "enabled")
|
|
1336
1382
|
|
|
1383
|
+
@_builtins.property
|
|
1384
|
+
@pulumi.getter(name="maxParallelNodesRepairedCount")
|
|
1385
|
+
def max_parallel_nodes_repaired_count(self) -> Optional[_builtins.int]:
|
|
1386
|
+
"""
|
|
1387
|
+
Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a count of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedPercentage at the same time.
|
|
1388
|
+
"""
|
|
1389
|
+
return pulumi.get(self, "max_parallel_nodes_repaired_count")
|
|
1390
|
+
|
|
1391
|
+
@_builtins.property
|
|
1392
|
+
@pulumi.getter(name="maxParallelNodesRepairedPercentage")
|
|
1393
|
+
def max_parallel_nodes_repaired_percentage(self) -> Optional[_builtins.int]:
|
|
1394
|
+
"""
|
|
1395
|
+
Specify the maximum number of nodes that can be repaired concurrently or in parallel, expressed as a percentage of unhealthy nodes. This gives you finer-grained control over the pace of node replacements. When using this, you cannot also set MaxParallelNodesRepairedCount at the same time.
|
|
1396
|
+
"""
|
|
1397
|
+
return pulumi.get(self, "max_parallel_nodes_repaired_percentage")
|
|
1398
|
+
|
|
1399
|
+
@_builtins.property
|
|
1400
|
+
@pulumi.getter(name="maxUnhealthyNodeThresholdCount")
|
|
1401
|
+
def max_unhealthy_node_threshold_count(self) -> Optional[_builtins.int]:
|
|
1402
|
+
"""
|
|
1403
|
+
Specify a count threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdPercentage at the same time.
|
|
1404
|
+
"""
|
|
1405
|
+
return pulumi.get(self, "max_unhealthy_node_threshold_count")
|
|
1406
|
+
|
|
1407
|
+
@_builtins.property
|
|
1408
|
+
@pulumi.getter(name="maxUnhealthyNodeThresholdPercentage")
|
|
1409
|
+
def max_unhealthy_node_threshold_percentage(self) -> Optional[_builtins.int]:
|
|
1410
|
+
"""
|
|
1411
|
+
Specify a percentage threshold of unhealthy nodes, above which node auto repair actions will stop. When using this, you cannot also set MaxUnhealthyNodeThresholdCount at the same time.
|
|
1412
|
+
"""
|
|
1413
|
+
return pulumi.get(self, "max_unhealthy_node_threshold_percentage")
|
|
1414
|
+
|
|
1415
|
+
@_builtins.property
|
|
1416
|
+
@pulumi.getter(name="nodeRepairConfigOverrides")
|
|
1417
|
+
def node_repair_config_overrides(self) -> Optional[Sequence['outputs.NodegroupNodeRepairConfigOverrides']]:
|
|
1418
|
+
"""
|
|
1419
|
+
Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1420
|
+
"""
|
|
1421
|
+
return pulumi.get(self, "node_repair_config_overrides")
|
|
1422
|
+
|
|
1423
|
+
|
|
1424
|
+
@pulumi.output_type
|
|
1425
|
+
class NodegroupNodeRepairConfigOverrides(dict):
|
|
1426
|
+
"""
|
|
1427
|
+
Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1428
|
+
"""
|
|
1429
|
+
@staticmethod
|
|
1430
|
+
def __key_warning(key: str):
|
|
1431
|
+
suggest = None
|
|
1432
|
+
if key == "minRepairWaitTimeMins":
|
|
1433
|
+
suggest = "min_repair_wait_time_mins"
|
|
1434
|
+
elif key == "nodeMonitoringCondition":
|
|
1435
|
+
suggest = "node_monitoring_condition"
|
|
1436
|
+
elif key == "nodeUnhealthyReason":
|
|
1437
|
+
suggest = "node_unhealthy_reason"
|
|
1438
|
+
elif key == "repairAction":
|
|
1439
|
+
suggest = "repair_action"
|
|
1440
|
+
|
|
1441
|
+
if suggest:
|
|
1442
|
+
pulumi.log.warn(f"Key '{key}' not found in NodegroupNodeRepairConfigOverrides. Access the value via the '{suggest}' property getter instead.")
|
|
1443
|
+
|
|
1444
|
+
def __getitem__(self, key: str) -> Any:
|
|
1445
|
+
NodegroupNodeRepairConfigOverrides.__key_warning(key)
|
|
1446
|
+
return super().__getitem__(key)
|
|
1447
|
+
|
|
1448
|
+
def get(self, key: str, default = None) -> Any:
|
|
1449
|
+
NodegroupNodeRepairConfigOverrides.__key_warning(key)
|
|
1450
|
+
return super().get(key, default)
|
|
1451
|
+
|
|
1452
|
+
def __init__(__self__, *,
|
|
1453
|
+
min_repair_wait_time_mins: Optional[_builtins.int] = None,
|
|
1454
|
+
node_monitoring_condition: Optional[_builtins.str] = None,
|
|
1455
|
+
node_unhealthy_reason: Optional[_builtins.str] = None,
|
|
1456
|
+
repair_action: Optional['NodegroupNodeRepairConfigOverridesRepairAction'] = None):
|
|
1457
|
+
"""
|
|
1458
|
+
Specify granular overrides for specific repair actions. These overrides control the repair action and the repair delay time before a node is considered eligible for repair. If you use this, you must specify all the values.
|
|
1459
|
+
:param _builtins.int min_repair_wait_time_mins: Specify the minimum time in minutes to wait before attempting to repair a node with this specific NodeMonitoringCondition and NodeUnhealthyReason.
|
|
1460
|
+
:param _builtins.str node_monitoring_condition: Specify an unhealthy condition reported by the node monitoring agent that this override would apply to.
|
|
1461
|
+
:param _builtins.str node_unhealthy_reason: Specify a reason reported by the node monitoring agent that this override would apply to.
|
|
1462
|
+
:param 'NodegroupNodeRepairConfigOverridesRepairAction' repair_action: Specify the repair action to take for nodes when all of the specified conditions are met.
|
|
1463
|
+
"""
|
|
1464
|
+
if min_repair_wait_time_mins is not None:
|
|
1465
|
+
pulumi.set(__self__, "min_repair_wait_time_mins", min_repair_wait_time_mins)
|
|
1466
|
+
if node_monitoring_condition is not None:
|
|
1467
|
+
pulumi.set(__self__, "node_monitoring_condition", node_monitoring_condition)
|
|
1468
|
+
if node_unhealthy_reason is not None:
|
|
1469
|
+
pulumi.set(__self__, "node_unhealthy_reason", node_unhealthy_reason)
|
|
1470
|
+
if repair_action is not None:
|
|
1471
|
+
pulumi.set(__self__, "repair_action", repair_action)
|
|
1472
|
+
|
|
1473
|
+
@_builtins.property
|
|
1474
|
+
@pulumi.getter(name="minRepairWaitTimeMins")
|
|
1475
|
+
def min_repair_wait_time_mins(self) -> Optional[_builtins.int]:
|
|
1476
|
+
"""
|
|
1477
|
+
Specify the minimum time in minutes to wait before attempting to repair a node with this specific NodeMonitoringCondition and NodeUnhealthyReason.
|
|
1478
|
+
"""
|
|
1479
|
+
return pulumi.get(self, "min_repair_wait_time_mins")
|
|
1480
|
+
|
|
1481
|
+
@_builtins.property
|
|
1482
|
+
@pulumi.getter(name="nodeMonitoringCondition")
|
|
1483
|
+
def node_monitoring_condition(self) -> Optional[_builtins.str]:
|
|
1484
|
+
"""
|
|
1485
|
+
Specify an unhealthy condition reported by the node monitoring agent that this override would apply to.
|
|
1486
|
+
"""
|
|
1487
|
+
return pulumi.get(self, "node_monitoring_condition")
|
|
1488
|
+
|
|
1489
|
+
@_builtins.property
|
|
1490
|
+
@pulumi.getter(name="nodeUnhealthyReason")
|
|
1491
|
+
def node_unhealthy_reason(self) -> Optional[_builtins.str]:
|
|
1492
|
+
"""
|
|
1493
|
+
Specify a reason reported by the node monitoring agent that this override would apply to.
|
|
1494
|
+
"""
|
|
1495
|
+
return pulumi.get(self, "node_unhealthy_reason")
|
|
1496
|
+
|
|
1497
|
+
@_builtins.property
|
|
1498
|
+
@pulumi.getter(name="repairAction")
|
|
1499
|
+
def repair_action(self) -> Optional['NodegroupNodeRepairConfigOverridesRepairAction']:
|
|
1500
|
+
"""
|
|
1501
|
+
Specify the repair action to take for nodes when all of the specified conditions are met.
|
|
1502
|
+
"""
|
|
1503
|
+
return pulumi.get(self, "repair_action")
|
|
1504
|
+
|
|
1337
1505
|
|
|
1338
1506
|
@pulumi.output_type
|
|
1339
1507
|
class NodegroupRemoteAccess(dict):
|
|
@@ -672,6 +672,9 @@ if not MYPY:
|
|
|
672
672
|
For example, you could specify `["aws:pass"]` or you could specify `["aws:pass", "customActionName"]` . For information about compatibility, see the custom action descriptions.
|
|
673
673
|
"""
|
|
674
674
|
enable_tls_session_holding: NotRequired[pulumi.Input[_builtins.bool]]
|
|
675
|
+
"""
|
|
676
|
+
When true, prevents TCP and TLS packets from reaching destination servers until TLS Inspection has evaluated Server Name Indication (SNI) rules. Requires an associated TLS Inspection configuration.
|
|
677
|
+
"""
|
|
675
678
|
policy_variables: NotRequired[pulumi.Input['FirewallPolicyPolicyVariablesPropertiesArgsDict']]
|
|
676
679
|
"""
|
|
677
680
|
Contains variables that you can use to override default Suricata settings in your firewall policy.
|
|
@@ -736,6 +739,7 @@ class FirewallPolicyArgs:
|
|
|
736
739
|
You must specify one of the standard actions: `aws:pass` , `aws:drop` , or `aws:forward_to_sfe` . In addition, you can specify custom actions that are compatible with your standard section choice.
|
|
737
740
|
|
|
738
741
|
For example, you could specify `["aws:pass"]` or you could specify `["aws:pass", "customActionName"]` . For information about compatibility, see the custom action descriptions.
|
|
742
|
+
:param pulumi.Input[_builtins.bool] enable_tls_session_holding: When true, prevents TCP and TLS packets from reaching destination servers until TLS Inspection has evaluated Server Name Indication (SNI) rules. Requires an associated TLS Inspection configuration.
|
|
739
743
|
:param pulumi.Input['FirewallPolicyPolicyVariablesPropertiesArgs'] policy_variables: Contains variables that you can use to override default Suricata settings in your firewall policy.
|
|
740
744
|
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] stateful_default_actions: The default actions to take on a packet that doesn't match any stateful rules. The stateful default action is optional, and is only valid when using the strict rule order.
|
|
741
745
|
|
|
@@ -807,6 +811,9 @@ class FirewallPolicyArgs:
|
|
|
807
811
|
@_builtins.property
|
|
808
812
|
@pulumi.getter(name="enableTlsSessionHolding")
|
|
809
813
|
def enable_tls_session_holding(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
814
|
+
"""
|
|
815
|
+
When true, prevents TCP and TLS packets from reaching destination servers until TLS Inspection has evaluated Server Name Indication (SNI) rules. Requires an associated TLS Inspection configuration.
|
|
816
|
+
"""
|
|
810
817
|
return pulumi.get(self, "enable_tls_session_holding")
|
|
811
818
|
|
|
812
819
|
@enable_tls_session_holding.setter
|
|
@@ -530,6 +530,9 @@ class Firewall(pulumi.CustomResource):
|
|
|
530
530
|
@_builtins.property
|
|
531
531
|
@pulumi.getter(name="transitGatewayAttachmentId")
|
|
532
532
|
def transit_gateway_attachment_id(self) -> pulumi.Output[_builtins.str]:
|
|
533
|
+
"""
|
|
534
|
+
The unique identifier of the transit gateway attachment associated with this firewall. This field is only present for transit gateway-attached firewalls.
|
|
535
|
+
"""
|
|
533
536
|
return pulumi.get(self, "transit_gateway_attachment_id")
|
|
534
537
|
|
|
535
538
|
@_builtins.property
|
|
@@ -188,6 +188,9 @@ class GetFirewallResult:
|
|
|
188
188
|
@_builtins.property
|
|
189
189
|
@pulumi.getter(name="transitGatewayAttachmentId")
|
|
190
190
|
def transit_gateway_attachment_id(self) -> Optional[_builtins.str]:
|
|
191
|
+
"""
|
|
192
|
+
The unique identifier of the transit gateway attachment associated with this firewall. This field is only present for transit gateway-attached firewalls.
|
|
193
|
+
"""
|
|
191
194
|
return pulumi.get(self, "transit_gateway_attachment_id")
|
|
192
195
|
|
|
193
196
|
@_builtins.property
|
|
@@ -161,6 +161,7 @@ class FirewallPolicy(dict):
|
|
|
161
161
|
You must specify one of the standard actions: `aws:pass` , `aws:drop` , or `aws:forward_to_sfe` . In addition, you can specify custom actions that are compatible with your standard section choice.
|
|
162
162
|
|
|
163
163
|
For example, you could specify `["aws:pass"]` or you could specify `["aws:pass", "customActionName"]` . For information about compatibility, see the custom action descriptions.
|
|
164
|
+
:param _builtins.bool enable_tls_session_holding: When true, prevents TCP and TLS packets from reaching destination servers until TLS Inspection has evaluated Server Name Indication (SNI) rules. Requires an associated TLS Inspection configuration.
|
|
164
165
|
:param 'FirewallPolicyPolicyVariablesProperties' policy_variables: Contains variables that you can use to override default Suricata settings in your firewall policy.
|
|
165
166
|
:param Sequence[_builtins.str] stateful_default_actions: The default actions to take on a packet that doesn't match any stateful rules. The stateful default action is optional, and is only valid when using the strict rule order.
|
|
166
167
|
|
|
@@ -224,6 +225,9 @@ class FirewallPolicy(dict):
|
|
|
224
225
|
@_builtins.property
|
|
225
226
|
@pulumi.getter(name="enableTlsSessionHolding")
|
|
226
227
|
def enable_tls_session_holding(self) -> Optional[_builtins.bool]:
|
|
228
|
+
"""
|
|
229
|
+
When true, prevents TCP and TLS packets from reaching destination servers until TLS Inspection has evaluated Server Name Indication (SNI) rules. Requires an associated TLS Inspection configuration.
|
|
230
|
+
"""
|
|
227
231
|
return pulumi.get(self, "enable_tls_session_holding")
|
|
228
232
|
|
|
229
233
|
@_builtins.property
|
|
@@ -25,6 +25,9 @@ class LinkDirection(_builtins.str, Enum):
|
|
|
25
25
|
|
|
26
26
|
@pulumi.type_token("aws-native:rtbfabric:LinkResponderErrorMaskingForHttpCodeAction")
|
|
27
27
|
class LinkResponderErrorMaskingForHttpCodeAction(_builtins.str, Enum):
|
|
28
|
+
"""
|
|
29
|
+
The action for the error..
|
|
30
|
+
"""
|
|
28
31
|
NO_BID = "NO_BID"
|
|
29
32
|
PASSTHROUGH = "PASSTHROUGH"
|
|
30
33
|
|