pulumi-kubernetes 4.23.0a1746131759__py3-none-any.whl → 4.23.0a1746153578__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-kubernetes might be problematic. Click here for more details.
- pulumi_kubernetes/__init__.py +36 -2
- pulumi_kubernetes/admissionregistration/v1/MutatingWebhookConfiguration.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/MutatingWebhookConfigurationList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/MutatingWebhookConfigurationPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingAdmissionPolicy.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingAdmissionPolicyBinding.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingAdmissionPolicyBindingList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingAdmissionPolicyBindingPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingAdmissionPolicyList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingAdmissionPolicyPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingWebhookConfiguration.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingWebhookConfigurationList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingWebhookConfigurationPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/MutatingAdmissionPolicy.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/MutatingAdmissionPolicyBinding.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/MutatingAdmissionPolicyBindingList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/MutatingAdmissionPolicyBindingPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/MutatingAdmissionPolicyList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/MutatingAdmissionPolicyPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/ValidatingAdmissionPolicy.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/ValidatingAdmissionPolicyBinding.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/ValidatingAdmissionPolicyBindingList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/ValidatingAdmissionPolicyBindingPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/ValidatingAdmissionPolicyList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/ValidatingAdmissionPolicyPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/_inputs.py +30 -30
- pulumi_kubernetes/admissionregistration/v1alpha1/outputs.py +20 -20
- pulumi_kubernetes/admissionregistration/v1beta1/MutatingWebhookConfiguration.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/MutatingWebhookConfigurationList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/MutatingWebhookConfigurationPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingAdmissionPolicy.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingAdmissionPolicyBinding.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingAdmissionPolicyBindingList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingAdmissionPolicyBindingPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingAdmissionPolicyList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingAdmissionPolicyPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingWebhookConfiguration.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingWebhookConfigurationList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingWebhookConfigurationPatch.py +1 -3
- pulumi_kubernetes/apiextensions/v1/CustomResourceDefinition.py +1 -3
- pulumi_kubernetes/apiextensions/v1/CustomResourceDefinitionList.py +1 -3
- pulumi_kubernetes/apiextensions/v1/CustomResourceDefinitionPatch.py +1 -3
- pulumi_kubernetes/apiextensions/v1beta1/CustomResourceDefinition.py +1 -3
- pulumi_kubernetes/apiextensions/v1beta1/CustomResourceDefinitionList.py +1 -3
- pulumi_kubernetes/apiextensions/v1beta1/CustomResourceDefinitionPatch.py +1 -3
- pulumi_kubernetes/apiregistration/v1/APIService.py +1 -3
- pulumi_kubernetes/apiregistration/v1/APIServiceList.py +1 -3
- pulumi_kubernetes/apiregistration/v1/APIServicePatch.py +1 -3
- pulumi_kubernetes/apiregistration/v1beta1/APIService.py +1 -3
- pulumi_kubernetes/apiregistration/v1beta1/APIServiceList.py +1 -3
- pulumi_kubernetes/apiregistration/v1beta1/APIServicePatch.py +1 -3
- pulumi_kubernetes/apps/v1/ControllerRevision.py +1 -3
- pulumi_kubernetes/apps/v1/ControllerRevisionList.py +1 -3
- pulumi_kubernetes/apps/v1/ControllerRevisionPatch.py +1 -3
- pulumi_kubernetes/apps/v1/DaemonSet.py +1 -3
- pulumi_kubernetes/apps/v1/DaemonSetList.py +1 -3
- pulumi_kubernetes/apps/v1/DaemonSetPatch.py +1 -3
- pulumi_kubernetes/apps/v1/Deployment.py +1 -3
- pulumi_kubernetes/apps/v1/DeploymentList.py +1 -3
- pulumi_kubernetes/apps/v1/DeploymentPatch.py +1 -3
- pulumi_kubernetes/apps/v1/ReplicaSet.py +1 -3
- pulumi_kubernetes/apps/v1/ReplicaSetList.py +5 -7
- pulumi_kubernetes/apps/v1/ReplicaSetPatch.py +1 -3
- pulumi_kubernetes/apps/v1/StatefulSet.py +1 -3
- pulumi_kubernetes/apps/v1/StatefulSetList.py +1 -3
- pulumi_kubernetes/apps/v1/StatefulSetPatch.py +1 -3
- pulumi_kubernetes/apps/v1/_inputs.py +109 -56
- pulumi_kubernetes/apps/v1/outputs.py +129 -56
- pulumi_kubernetes/apps/v1beta1/ControllerRevision.py +1 -3
- pulumi_kubernetes/apps/v1beta1/ControllerRevisionList.py +1 -3
- pulumi_kubernetes/apps/v1beta1/ControllerRevisionPatch.py +1 -3
- pulumi_kubernetes/apps/v1beta1/Deployment.py +1 -3
- pulumi_kubernetes/apps/v1beta1/DeploymentList.py +1 -3
- pulumi_kubernetes/apps/v1beta1/DeploymentPatch.py +1 -3
- pulumi_kubernetes/apps/v1beta1/StatefulSet.py +1 -3
- pulumi_kubernetes/apps/v1beta1/StatefulSetList.py +1 -3
- pulumi_kubernetes/apps/v1beta1/StatefulSetPatch.py +1 -3
- pulumi_kubernetes/apps/v1beta2/ControllerRevision.py +1 -3
- pulumi_kubernetes/apps/v1beta2/ControllerRevisionList.py +1 -3
- pulumi_kubernetes/apps/v1beta2/ControllerRevisionPatch.py +1 -3
- pulumi_kubernetes/apps/v1beta2/DaemonSet.py +1 -3
- pulumi_kubernetes/apps/v1beta2/DaemonSetList.py +1 -3
- pulumi_kubernetes/apps/v1beta2/DaemonSetPatch.py +1 -3
- pulumi_kubernetes/apps/v1beta2/Deployment.py +1 -3
- pulumi_kubernetes/apps/v1beta2/DeploymentList.py +1 -3
- pulumi_kubernetes/apps/v1beta2/DeploymentPatch.py +1 -3
- pulumi_kubernetes/apps/v1beta2/ReplicaSet.py +1 -3
- pulumi_kubernetes/apps/v1beta2/ReplicaSetList.py +1 -3
- pulumi_kubernetes/apps/v1beta2/ReplicaSetPatch.py +1 -3
- pulumi_kubernetes/apps/v1beta2/StatefulSet.py +1 -3
- pulumi_kubernetes/apps/v1beta2/StatefulSetList.py +1 -3
- pulumi_kubernetes/apps/v1beta2/StatefulSetPatch.py +1 -3
- pulumi_kubernetes/auditregistration/v1alpha1/AuditSink.py +1 -3
- pulumi_kubernetes/auditregistration/v1alpha1/AuditSinkList.py +1 -3
- pulumi_kubernetes/auditregistration/v1alpha1/AuditSinkPatch.py +1 -3
- pulumi_kubernetes/autoscaling/v1/HorizontalPodAutoscaler.py +1 -3
- pulumi_kubernetes/autoscaling/v1/HorizontalPodAutoscalerList.py +1 -3
- pulumi_kubernetes/autoscaling/v1/HorizontalPodAutoscalerPatch.py +1 -3
- pulumi_kubernetes/autoscaling/v2/HorizontalPodAutoscaler.py +1 -3
- pulumi_kubernetes/autoscaling/v2/HorizontalPodAutoscalerList.py +1 -3
- pulumi_kubernetes/autoscaling/v2/HorizontalPodAutoscalerPatch.py +1 -3
- pulumi_kubernetes/autoscaling/v2/_inputs.py +92 -12
- pulumi_kubernetes/autoscaling/v2/outputs.py +66 -10
- pulumi_kubernetes/autoscaling/v2beta1/HorizontalPodAutoscaler.py +1 -3
- pulumi_kubernetes/autoscaling/v2beta1/HorizontalPodAutoscalerList.py +1 -3
- pulumi_kubernetes/autoscaling/v2beta1/HorizontalPodAutoscalerPatch.py +1 -3
- pulumi_kubernetes/autoscaling/v2beta2/HorizontalPodAutoscaler.py +1 -3
- pulumi_kubernetes/autoscaling/v2beta2/HorizontalPodAutoscalerList.py +1 -3
- pulumi_kubernetes/autoscaling/v2beta2/HorizontalPodAutoscalerPatch.py +1 -3
- pulumi_kubernetes/batch/v1/CronJob.py +1 -3
- pulumi_kubernetes/batch/v1/CronJobList.py +1 -3
- pulumi_kubernetes/batch/v1/CronJobPatch.py +1 -3
- pulumi_kubernetes/batch/v1/Job.py +1 -3
- pulumi_kubernetes/batch/v1/JobList.py +1 -3
- pulumi_kubernetes/batch/v1/JobPatch.py +1 -3
- pulumi_kubernetes/batch/v1/_inputs.py +12 -42
- pulumi_kubernetes/batch/v1/outputs.py +8 -32
- pulumi_kubernetes/batch/v1beta1/CronJob.py +1 -3
- pulumi_kubernetes/batch/v1beta1/CronJobList.py +1 -3
- pulumi_kubernetes/batch/v1beta1/CronJobPatch.py +1 -3
- pulumi_kubernetes/batch/v2alpha1/CronJob.py +1 -3
- pulumi_kubernetes/batch/v2alpha1/CronJobList.py +1 -3
- pulumi_kubernetes/batch/v2alpha1/CronJobPatch.py +1 -3
- pulumi_kubernetes/certificates/v1/CertificateSigningRequest.py +1 -3
- pulumi_kubernetes/certificates/v1/CertificateSigningRequestList.py +1 -3
- pulumi_kubernetes/certificates/v1/CertificateSigningRequestPatch.py +1 -3
- pulumi_kubernetes/certificates/v1alpha1/ClusterTrustBundle.py +3 -3
- pulumi_kubernetes/certificates/v1alpha1/ClusterTrustBundleList.py +1 -3
- pulumi_kubernetes/certificates/v1alpha1/ClusterTrustBundlePatch.py +3 -3
- pulumi_kubernetes/certificates/v1beta1/CertificateSigningRequest.py +1 -3
- pulumi_kubernetes/certificates/v1beta1/CertificateSigningRequestList.py +1 -3
- pulumi_kubernetes/certificates/v1beta1/CertificateSigningRequestPatch.py +1 -3
- pulumi_kubernetes/certificates/v1beta1/ClusterTrustBundle.py +227 -0
- pulumi_kubernetes/certificates/v1beta1/ClusterTrustBundleList.py +217 -0
- pulumi_kubernetes/certificates/v1beta1/ClusterTrustBundlePatch.py +238 -0
- pulumi_kubernetes/certificates/v1beta1/__init__.py +3 -0
- pulumi_kubernetes/certificates/v1beta1/_inputs.py +292 -0
- pulumi_kubernetes/certificates/v1beta1/outputs.py +241 -0
- pulumi_kubernetes/coordination/v1/Lease.py +1 -3
- pulumi_kubernetes/coordination/v1/LeaseList.py +1 -3
- pulumi_kubernetes/coordination/v1/LeasePatch.py +1 -3
- pulumi_kubernetes/coordination/v1alpha1/LeaseCandidate.py +2 -4
- pulumi_kubernetes/coordination/v1alpha1/LeaseCandidateList.py +1 -3
- pulumi_kubernetes/coordination/v1alpha1/LeaseCandidatePatch.py +2 -4
- pulumi_kubernetes/coordination/v1alpha2/LeaseCandidate.py +2 -4
- pulumi_kubernetes/coordination/v1alpha2/LeaseCandidateList.py +1 -3
- pulumi_kubernetes/coordination/v1alpha2/LeaseCandidatePatch.py +2 -4
- pulumi_kubernetes/coordination/v1alpha2/_inputs.py +6 -6
- pulumi_kubernetes/coordination/v1alpha2/outputs.py +4 -4
- pulumi_kubernetes/coordination/v1beta1/Lease.py +1 -3
- pulumi_kubernetes/coordination/v1beta1/LeaseCandidate.py +218 -0
- pulumi_kubernetes/coordination/v1beta1/LeaseCandidateList.py +217 -0
- pulumi_kubernetes/coordination/v1beta1/LeaseCandidatePatch.py +230 -0
- pulumi_kubernetes/coordination/v1beta1/LeaseList.py +1 -3
- pulumi_kubernetes/coordination/v1beta1/LeasePatch.py +1 -3
- pulumi_kubernetes/coordination/v1beta1/__init__.py +3 -0
- pulumi_kubernetes/coordination/v1beta1/_inputs.py +371 -0
- pulumi_kubernetes/coordination/v1beta1/outputs.py +292 -0
- pulumi_kubernetes/core/v1/Binding.py +1 -3
- pulumi_kubernetes/core/v1/BindingPatch.py +1 -3
- pulumi_kubernetes/core/v1/ConfigMap.py +1 -3
- pulumi_kubernetes/core/v1/ConfigMapList.py +1 -3
- pulumi_kubernetes/core/v1/ConfigMapPatch.py +1 -3
- pulumi_kubernetes/core/v1/Endpoints.py +9 -3
- pulumi_kubernetes/core/v1/EndpointsList.py +3 -5
- pulumi_kubernetes/core/v1/EndpointsPatch.py +9 -3
- pulumi_kubernetes/core/v1/Event.py +1 -3
- pulumi_kubernetes/core/v1/EventList.py +1 -3
- pulumi_kubernetes/core/v1/EventPatch.py +1 -3
- pulumi_kubernetes/core/v1/LimitRange.py +1 -3
- pulumi_kubernetes/core/v1/LimitRangeList.py +1 -3
- pulumi_kubernetes/core/v1/LimitRangePatch.py +1 -3
- pulumi_kubernetes/core/v1/Namespace.py +1 -3
- pulumi_kubernetes/core/v1/NamespaceList.py +1 -3
- pulumi_kubernetes/core/v1/NamespacePatch.py +1 -3
- pulumi_kubernetes/core/v1/Node.py +1 -3
- pulumi_kubernetes/core/v1/NodeList.py +1 -3
- pulumi_kubernetes/core/v1/NodePatch.py +1 -3
- pulumi_kubernetes/core/v1/PersistentVolume.py +1 -3
- pulumi_kubernetes/core/v1/PersistentVolumeClaim.py +1 -3
- pulumi_kubernetes/core/v1/PersistentVolumeClaimList.py +1 -3
- pulumi_kubernetes/core/v1/PersistentVolumeClaimPatch.py +1 -3
- pulumi_kubernetes/core/v1/PersistentVolumeList.py +1 -3
- pulumi_kubernetes/core/v1/PersistentVolumePatch.py +1 -3
- pulumi_kubernetes/core/v1/Pod.py +1 -3
- pulumi_kubernetes/core/v1/PodList.py +1 -3
- pulumi_kubernetes/core/v1/PodPatch.py +1 -3
- pulumi_kubernetes/core/v1/PodTemplate.py +1 -3
- pulumi_kubernetes/core/v1/PodTemplateList.py +1 -3
- pulumi_kubernetes/core/v1/PodTemplatePatch.py +1 -3
- pulumi_kubernetes/core/v1/ReplicationController.py +1 -3
- pulumi_kubernetes/core/v1/ReplicationControllerList.py +1 -3
- pulumi_kubernetes/core/v1/ReplicationControllerPatch.py +1 -3
- pulumi_kubernetes/core/v1/ResourceQuota.py +1 -3
- pulumi_kubernetes/core/v1/ResourceQuotaList.py +1 -3
- pulumi_kubernetes/core/v1/ResourceQuotaPatch.py +1 -3
- pulumi_kubernetes/core/v1/Secret.py +1 -3
- pulumi_kubernetes/core/v1/SecretList.py +1 -3
- pulumi_kubernetes/core/v1/SecretPatch.py +1 -3
- pulumi_kubernetes/core/v1/Service.py +1 -3
- pulumi_kubernetes/core/v1/ServiceAccount.py +1 -3
- pulumi_kubernetes/core/v1/ServiceAccountList.py +1 -3
- pulumi_kubernetes/core/v1/ServiceAccountPatch.py +1 -3
- pulumi_kubernetes/core/v1/ServiceList.py +1 -3
- pulumi_kubernetes/core/v1/ServicePatch.py +1 -3
- pulumi_kubernetes/core/v1/_enums.py +2 -1
- pulumi_kubernetes/core/v1/_inputs.py +240 -66
- pulumi_kubernetes/core/v1/outputs.py +251 -51
- pulumi_kubernetes/discovery/v1/EndpointSlice.py +11 -13
- pulumi_kubernetes/discovery/v1/EndpointSliceList.py +1 -3
- pulumi_kubernetes/discovery/v1/EndpointSlicePatch.py +11 -13
- pulumi_kubernetes/discovery/v1/_inputs.py +159 -44
- pulumi_kubernetes/discovery/v1/outputs.py +107 -32
- pulumi_kubernetes/discovery/v1beta1/EndpointSlice.py +1 -3
- pulumi_kubernetes/discovery/v1beta1/EndpointSliceList.py +1 -3
- pulumi_kubernetes/discovery/v1beta1/EndpointSlicePatch.py +1 -3
- pulumi_kubernetes/events/v1/Event.py +1 -3
- pulumi_kubernetes/events/v1/EventList.py +1 -3
- pulumi_kubernetes/events/v1/EventPatch.py +1 -3
- pulumi_kubernetes/events/v1beta1/Event.py +1 -3
- pulumi_kubernetes/events/v1beta1/EventList.py +1 -3
- pulumi_kubernetes/events/v1beta1/EventPatch.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/DaemonSet.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/DaemonSetList.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/DaemonSetPatch.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/Deployment.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/DeploymentList.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/DeploymentPatch.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/Ingress.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/IngressList.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/IngressPatch.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/NetworkPolicy.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/NetworkPolicyList.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/NetworkPolicyPatch.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/PodSecurityPolicy.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/PodSecurityPolicyList.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/PodSecurityPolicyPatch.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/ReplicaSet.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/ReplicaSetList.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/ReplicaSetPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1/FlowSchema.py +1 -3
- pulumi_kubernetes/flowcontrol/v1/FlowSchemaList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1/FlowSchemaPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1/PriorityLevelConfiguration.py +1 -3
- pulumi_kubernetes/flowcontrol/v1/PriorityLevelConfigurationList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1/PriorityLevelConfigurationPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1alpha1/FlowSchema.py +1 -3
- pulumi_kubernetes/flowcontrol/v1alpha1/FlowSchemaList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1alpha1/FlowSchemaPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1alpha1/PriorityLevelConfiguration.py +1 -3
- pulumi_kubernetes/flowcontrol/v1alpha1/PriorityLevelConfigurationList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1alpha1/PriorityLevelConfigurationPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta1/FlowSchema.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta1/FlowSchemaList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta1/FlowSchemaPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta1/PriorityLevelConfiguration.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta1/PriorityLevelConfigurationList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta1/PriorityLevelConfigurationPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta2/FlowSchema.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta2/FlowSchemaList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta2/FlowSchemaPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta2/PriorityLevelConfiguration.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta2/PriorityLevelConfigurationList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta2/PriorityLevelConfigurationPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta3/FlowSchema.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta3/FlowSchemaList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta3/FlowSchemaPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta3/PriorityLevelConfiguration.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta3/PriorityLevelConfigurationList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta3/PriorityLevelConfigurationPatch.py +1 -3
- pulumi_kubernetes/helm/v3/Release.py +1 -3
- pulumi_kubernetes/helm/v4/Chart.py +1 -3
- pulumi_kubernetes/kustomize/v2/Directory.py +1 -3
- pulumi_kubernetes/meta/v1/Status.py +1 -3
- pulumi_kubernetes/meta/v1/StatusPatch.py +1 -3
- pulumi_kubernetes/networking/v1/IPAddress.py +218 -0
- pulumi_kubernetes/networking/v1/IPAddressList.py +217 -0
- pulumi_kubernetes/networking/v1/IPAddressPatch.py +230 -0
- pulumi_kubernetes/networking/v1/Ingress.py +1 -3
- pulumi_kubernetes/networking/v1/IngressClass.py +1 -3
- pulumi_kubernetes/networking/v1/IngressClassList.py +1 -3
- pulumi_kubernetes/networking/v1/IngressClassPatch.py +1 -3
- pulumi_kubernetes/networking/v1/IngressList.py +1 -3
- pulumi_kubernetes/networking/v1/IngressPatch.py +1 -3
- pulumi_kubernetes/networking/v1/NetworkPolicy.py +1 -3
- pulumi_kubernetes/networking/v1/NetworkPolicyList.py +1 -3
- pulumi_kubernetes/networking/v1/NetworkPolicyPatch.py +1 -3
- pulumi_kubernetes/networking/v1/ServiceCIDR.py +228 -0
- pulumi_kubernetes/networking/v1/ServiceCIDRList.py +217 -0
- pulumi_kubernetes/networking/v1/ServiceCIDRPatch.py +240 -0
- pulumi_kubernetes/networking/v1/__init__.py +6 -0
- pulumi_kubernetes/networking/v1/_inputs.py +599 -0
- pulumi_kubernetes/networking/v1/outputs.py +461 -0
- pulumi_kubernetes/networking/v1alpha1/ClusterCIDR.py +1 -3
- pulumi_kubernetes/networking/v1alpha1/ClusterCIDRList.py +1 -3
- pulumi_kubernetes/networking/v1alpha1/ClusterCIDRPatch.py +1 -3
- pulumi_kubernetes/networking/v1alpha1/IPAddress.py +2 -4
- pulumi_kubernetes/networking/v1alpha1/IPAddressList.py +1 -3
- pulumi_kubernetes/networking/v1alpha1/IPAddressPatch.py +2 -4
- pulumi_kubernetes/networking/v1alpha1/ServiceCIDR.py +2 -4
- pulumi_kubernetes/networking/v1alpha1/ServiceCIDRList.py +1 -3
- pulumi_kubernetes/networking/v1alpha1/ServiceCIDRPatch.py +2 -4
- pulumi_kubernetes/networking/v1beta1/IPAddress.py +2 -4
- pulumi_kubernetes/networking/v1beta1/IPAddressList.py +1 -3
- pulumi_kubernetes/networking/v1beta1/IPAddressPatch.py +2 -4
- pulumi_kubernetes/networking/v1beta1/Ingress.py +1 -3
- pulumi_kubernetes/networking/v1beta1/IngressClass.py +1 -3
- pulumi_kubernetes/networking/v1beta1/IngressClassList.py +1 -3
- pulumi_kubernetes/networking/v1beta1/IngressClassPatch.py +1 -3
- pulumi_kubernetes/networking/v1beta1/IngressList.py +1 -3
- pulumi_kubernetes/networking/v1beta1/IngressPatch.py +1 -3
- pulumi_kubernetes/networking/v1beta1/ServiceCIDR.py +2 -4
- pulumi_kubernetes/networking/v1beta1/ServiceCIDRList.py +1 -3
- pulumi_kubernetes/networking/v1beta1/ServiceCIDRPatch.py +2 -4
- pulumi_kubernetes/node/v1/RuntimeClass.py +1 -3
- pulumi_kubernetes/node/v1/RuntimeClassList.py +1 -3
- pulumi_kubernetes/node/v1/RuntimeClassPatch.py +1 -3
- pulumi_kubernetes/node/v1alpha1/RuntimeClass.py +1 -3
- pulumi_kubernetes/node/v1alpha1/RuntimeClassList.py +1 -3
- pulumi_kubernetes/node/v1alpha1/RuntimeClassPatch.py +1 -3
- pulumi_kubernetes/node/v1beta1/RuntimeClass.py +1 -3
- pulumi_kubernetes/node/v1beta1/RuntimeClassList.py +1 -3
- pulumi_kubernetes/node/v1beta1/RuntimeClassPatch.py +1 -3
- pulumi_kubernetes/policy/v1/PodDisruptionBudget.py +1 -3
- pulumi_kubernetes/policy/v1/PodDisruptionBudgetList.py +1 -3
- pulumi_kubernetes/policy/v1/PodDisruptionBudgetPatch.py +1 -3
- pulumi_kubernetes/policy/v1/_inputs.py +0 -12
- pulumi_kubernetes/policy/v1/outputs.py +0 -8
- pulumi_kubernetes/policy/v1beta1/PodDisruptionBudget.py +1 -3
- pulumi_kubernetes/policy/v1beta1/PodDisruptionBudgetList.py +1 -3
- pulumi_kubernetes/policy/v1beta1/PodDisruptionBudgetPatch.py +1 -3
- pulumi_kubernetes/policy/v1beta1/PodSecurityPolicy.py +1 -3
- pulumi_kubernetes/policy/v1beta1/PodSecurityPolicyList.py +1 -3
- pulumi_kubernetes/policy/v1beta1/PodSecurityPolicyPatch.py +1 -3
- pulumi_kubernetes/provider.py +1 -3
- pulumi_kubernetes/pulumi-plugin.json +1 -1
- pulumi_kubernetes/rbac/v1/ClusterRole.py +1 -3
- pulumi_kubernetes/rbac/v1/ClusterRoleBinding.py +1 -3
- pulumi_kubernetes/rbac/v1/ClusterRoleBindingList.py +1 -3
- pulumi_kubernetes/rbac/v1/ClusterRoleBindingPatch.py +1 -3
- pulumi_kubernetes/rbac/v1/ClusterRoleList.py +1 -3
- pulumi_kubernetes/rbac/v1/ClusterRolePatch.py +1 -3
- pulumi_kubernetes/rbac/v1/Role.py +1 -3
- pulumi_kubernetes/rbac/v1/RoleBinding.py +1 -3
- pulumi_kubernetes/rbac/v1/RoleBindingList.py +1 -3
- pulumi_kubernetes/rbac/v1/RoleBindingPatch.py +1 -3
- pulumi_kubernetes/rbac/v1/RoleList.py +1 -3
- pulumi_kubernetes/rbac/v1/RolePatch.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/ClusterRole.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/ClusterRoleBinding.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/ClusterRoleBindingList.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/ClusterRoleBindingPatch.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/ClusterRoleList.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/ClusterRolePatch.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/Role.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/RoleBinding.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/RoleBindingList.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/RoleBindingPatch.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/RoleList.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/RolePatch.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/ClusterRole.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/ClusterRoleBinding.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/ClusterRoleBindingList.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/ClusterRoleBindingPatch.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/ClusterRoleList.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/ClusterRolePatch.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/Role.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/RoleBinding.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/RoleBindingList.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/RoleBindingPatch.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/RoleList.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/RolePatch.py +1 -3
- pulumi_kubernetes/resource/__init__.py +3 -0
- pulumi_kubernetes/resource/v1alpha1/PodScheduling.py +1 -3
- pulumi_kubernetes/resource/v1alpha1/PodSchedulingList.py +1 -3
- pulumi_kubernetes/resource/v1alpha1/PodSchedulingPatch.py +1 -3
- pulumi_kubernetes/resource/v1alpha1/ResourceClaim.py +2 -4
- pulumi_kubernetes/resource/v1alpha1/ResourceClaimList.py +1 -3
- pulumi_kubernetes/resource/v1alpha1/ResourceClaimPatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha1/ResourceClaimTemplate.py +2 -4
- pulumi_kubernetes/resource/v1alpha1/ResourceClaimTemplateList.py +1 -3
- pulumi_kubernetes/resource/v1alpha1/ResourceClaimTemplatePatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha1/ResourceClass.py +1 -3
- pulumi_kubernetes/resource/v1alpha1/ResourceClassList.py +1 -3
- pulumi_kubernetes/resource/v1alpha1/ResourceClassPatch.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/PodSchedulingContext.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/PodSchedulingContextList.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/PodSchedulingContextPatch.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClaim.py +2 -4
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimList.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimParameters.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimParametersList.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimParametersPatch.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimPatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimTemplate.py +2 -4
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimTemplateList.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimTemplatePatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha2/ResourceClass.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClassList.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClassParameters.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClassParametersList.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClassParametersPatch.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClassPatch.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceSlice.py +2 -4
- pulumi_kubernetes/resource/v1alpha2/ResourceSliceList.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceSlicePatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/DeviceClass.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/DeviceClassList.py +1 -3
- pulumi_kubernetes/resource/v1alpha3/DeviceClassPatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/DeviceTaintRule.py +225 -0
- pulumi_kubernetes/resource/v1alpha3/DeviceTaintRuleList.py +217 -0
- pulumi_kubernetes/resource/v1alpha3/DeviceTaintRulePatch.py +236 -0
- pulumi_kubernetes/resource/v1alpha3/PodSchedulingContext.py +1 -3
- pulumi_kubernetes/resource/v1alpha3/PodSchedulingContextList.py +1 -3
- pulumi_kubernetes/resource/v1alpha3/PodSchedulingContextPatch.py +1 -3
- pulumi_kubernetes/resource/v1alpha3/ResourceClaim.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/ResourceClaimList.py +1 -3
- pulumi_kubernetes/resource/v1alpha3/ResourceClaimPatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/ResourceClaimTemplate.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/ResourceClaimTemplateList.py +1 -3
- pulumi_kubernetes/resource/v1alpha3/ResourceClaimTemplatePatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/ResourceSlice.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/ResourceSliceList.py +1 -3
- pulumi_kubernetes/resource/v1alpha3/ResourceSlicePatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/__init__.py +3 -0
- pulumi_kubernetes/resource/v1alpha3/_inputs.py +2559 -213
- pulumi_kubernetes/resource/v1alpha3/outputs.py +2037 -256
- pulumi_kubernetes/resource/v1beta1/DeviceClass.py +2 -4
- pulumi_kubernetes/resource/v1beta1/DeviceClassList.py +1 -3
- pulumi_kubernetes/resource/v1beta1/DeviceClassPatch.py +2 -4
- pulumi_kubernetes/resource/v1beta1/ResourceClaim.py +2 -4
- pulumi_kubernetes/resource/v1beta1/ResourceClaimList.py +1 -3
- pulumi_kubernetes/resource/v1beta1/ResourceClaimPatch.py +2 -4
- pulumi_kubernetes/resource/v1beta1/ResourceClaimTemplate.py +2 -4
- pulumi_kubernetes/resource/v1beta1/ResourceClaimTemplateList.py +1 -3
- pulumi_kubernetes/resource/v1beta1/ResourceClaimTemplatePatch.py +2 -4
- pulumi_kubernetes/resource/v1beta1/ResourceSlice.py +2 -4
- pulumi_kubernetes/resource/v1beta1/ResourceSliceList.py +1 -3
- pulumi_kubernetes/resource/v1beta1/ResourceSlicePatch.py +2 -4
- pulumi_kubernetes/resource/v1beta1/_inputs.py +2044 -176
- pulumi_kubernetes/resource/v1beta1/outputs.py +1536 -134
- pulumi_kubernetes/resource/v1beta2/DeviceClass.py +239 -0
- pulumi_kubernetes/resource/v1beta2/DeviceClassList.py +217 -0
- pulumi_kubernetes/resource/v1beta2/DeviceClassPatch.py +250 -0
- pulumi_kubernetes/resource/v1beta2/ResourceClaim.py +234 -0
- pulumi_kubernetes/resource/v1beta2/ResourceClaimList.py +218 -0
- pulumi_kubernetes/resource/v1beta2/ResourceClaimPatch.py +245 -0
- pulumi_kubernetes/resource/v1beta2/ResourceClaimTemplate.py +231 -0
- pulumi_kubernetes/resource/v1beta2/ResourceClaimTemplateList.py +217 -0
- pulumi_kubernetes/resource/v1beta2/ResourceClaimTemplatePatch.py +242 -0
- pulumi_kubernetes/resource/v1beta2/ResourceSlice.py +248 -0
- pulumi_kubernetes/resource/v1beta2/ResourceSliceList.py +218 -0
- pulumi_kubernetes/resource/v1beta2/ResourceSlicePatch.py +259 -0
- pulumi_kubernetes/resource/v1beta2/__init__.py +22 -0
- pulumi_kubernetes/resource/v1beta2/_inputs.py +5681 -0
- pulumi_kubernetes/resource/v1beta2/outputs.py +4726 -0
- pulumi_kubernetes/scheduling/v1/PriorityClass.py +1 -3
- pulumi_kubernetes/scheduling/v1/PriorityClassList.py +1 -3
- pulumi_kubernetes/scheduling/v1/PriorityClassPatch.py +1 -3
- pulumi_kubernetes/scheduling/v1alpha1/PriorityClass.py +1 -3
- pulumi_kubernetes/scheduling/v1alpha1/PriorityClassList.py +1 -3
- pulumi_kubernetes/scheduling/v1alpha1/PriorityClassPatch.py +1 -3
- pulumi_kubernetes/scheduling/v1beta1/PriorityClass.py +1 -3
- pulumi_kubernetes/scheduling/v1beta1/PriorityClassList.py +1 -3
- pulumi_kubernetes/scheduling/v1beta1/PriorityClassPatch.py +1 -3
- pulumi_kubernetes/settings/v1alpha1/PodPreset.py +1 -3
- pulumi_kubernetes/settings/v1alpha1/PodPresetList.py +1 -3
- pulumi_kubernetes/settings/v1alpha1/PodPresetPatch.py +1 -3
- pulumi_kubernetes/storage/v1/CSIDriver.py +1 -3
- pulumi_kubernetes/storage/v1/CSIDriverList.py +1 -3
- pulumi_kubernetes/storage/v1/CSIDriverPatch.py +1 -3
- pulumi_kubernetes/storage/v1/CSINode.py +1 -3
- pulumi_kubernetes/storage/v1/CSINodeList.py +1 -3
- pulumi_kubernetes/storage/v1/CSINodePatch.py +1 -3
- pulumi_kubernetes/storage/v1/CSIStorageCapacity.py +1 -3
- pulumi_kubernetes/storage/v1/CSIStorageCapacityList.py +1 -3
- pulumi_kubernetes/storage/v1/CSIStorageCapacityPatch.py +1 -3
- pulumi_kubernetes/storage/v1/StorageClass.py +1 -3
- pulumi_kubernetes/storage/v1/StorageClassList.py +1 -3
- pulumi_kubernetes/storage/v1/StorageClassPatch.py +1 -3
- pulumi_kubernetes/storage/v1/VolumeAttachment.py +1 -3
- pulumi_kubernetes/storage/v1/VolumeAttachmentList.py +1 -3
- pulumi_kubernetes/storage/v1/VolumeAttachmentPatch.py +1 -3
- pulumi_kubernetes/storage/v1/_inputs.py +90 -0
- pulumi_kubernetes/storage/v1/outputs.py +110 -0
- pulumi_kubernetes/storage/v1alpha1/VolumeAttachment.py +1 -3
- pulumi_kubernetes/storage/v1alpha1/VolumeAttachmentList.py +1 -3
- pulumi_kubernetes/storage/v1alpha1/VolumeAttachmentPatch.py +1 -3
- pulumi_kubernetes/storage/v1alpha1/VolumeAttributesClass.py +1 -3
- pulumi_kubernetes/storage/v1alpha1/VolumeAttributesClassList.py +1 -3
- pulumi_kubernetes/storage/v1alpha1/VolumeAttributesClassPatch.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSIDriver.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSIDriverList.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSIDriverPatch.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSINode.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSINodeList.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSINodePatch.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSIStorageCapacityList.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSIStorageCapacityPatch.py +1 -3
- pulumi_kubernetes/storage/v1beta1/StorageClass.py +1 -3
- pulumi_kubernetes/storage/v1beta1/StorageClassList.py +1 -3
- pulumi_kubernetes/storage/v1beta1/StorageClassPatch.py +1 -3
- pulumi_kubernetes/storage/v1beta1/VolumeAttachment.py +1 -3
- pulumi_kubernetes/storage/v1beta1/VolumeAttachmentList.py +1 -3
- pulumi_kubernetes/storage/v1beta1/VolumeAttachmentPatch.py +1 -3
- pulumi_kubernetes/storage/v1beta1/VolumeAttributesClass.py +1 -3
- pulumi_kubernetes/storage/v1beta1/VolumeAttributesClassList.py +1 -3
- pulumi_kubernetes/storage/v1beta1/VolumeAttributesClassPatch.py +1 -3
- pulumi_kubernetes/storagemigration/v1alpha1/StorageVersionMigration.py +1 -3
- pulumi_kubernetes/storagemigration/v1alpha1/StorageVersionMigrationList.py +1 -3
- pulumi_kubernetes/storagemigration/v1alpha1/StorageVersionMigrationPatch.py +1 -3
- pulumi_kubernetes/yaml/v2/ConfigFile.py +1 -3
- pulumi_kubernetes/yaml/v2/ConfigGroup.py +1 -3
- pulumi_kubernetes/yaml/yaml.py +108 -0
- {pulumi_kubernetes-4.23.0a1746131759.dist-info → pulumi_kubernetes-4.23.0a1746153578.dist-info}/METADATA +2 -2
- pulumi_kubernetes-4.23.0a1746153578.dist-info/RECORD +709 -0
- pulumi_kubernetes-4.23.0a1746131759.dist-info/RECORD +0 -679
- {pulumi_kubernetes-4.23.0a1746131759.dist-info → pulumi_kubernetes-4.23.0a1746153578.dist-info}/WHEEL +0 -0
- {pulumi_kubernetes-4.23.0a1746131759.dist-info → pulumi_kubernetes-4.23.0a1746153578.dist-info}/top_level.txt +0 -0
|
@@ -637,7 +637,11 @@ class HPAScalingPolicyPatch(dict):
|
|
|
637
637
|
@pulumi.output_type
|
|
638
638
|
class HPAScalingRules(dict):
|
|
639
639
|
"""
|
|
640
|
-
HPAScalingRules configures the scaling behavior for one direction
|
|
640
|
+
HPAScalingRules configures the scaling behavior for one direction via scaling Policy Rules and a configurable metric tolerance.
|
|
641
|
+
|
|
642
|
+
Scaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.
|
|
643
|
+
|
|
644
|
+
The tolerance is applied to the metric values and prevents scaling too eagerly for small metric variations. (Note that setting a tolerance requires enabling the alpha HPAConfigurableTolerance feature gate.)
|
|
641
645
|
"""
|
|
642
646
|
@staticmethod
|
|
643
647
|
def __key_warning(key: str):
|
|
@@ -661,12 +665,22 @@ class HPAScalingRules(dict):
|
|
|
661
665
|
def __init__(__self__, *,
|
|
662
666
|
policies: Optional[Sequence['outputs.HPAScalingPolicy']] = None,
|
|
663
667
|
select_policy: Optional[builtins.str] = None,
|
|
664
|
-
stabilization_window_seconds: Optional[builtins.int] = None
|
|
668
|
+
stabilization_window_seconds: Optional[builtins.int] = None,
|
|
669
|
+
tolerance: Optional[builtins.str] = None):
|
|
665
670
|
"""
|
|
666
|
-
HPAScalingRules configures the scaling behavior for one direction
|
|
667
|
-
|
|
671
|
+
HPAScalingRules configures the scaling behavior for one direction via scaling Policy Rules and a configurable metric tolerance.
|
|
672
|
+
|
|
673
|
+
Scaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.
|
|
674
|
+
|
|
675
|
+
The tolerance is applied to the metric values and prevents scaling too eagerly for small metric variations. (Note that setting a tolerance requires enabling the alpha HPAConfigurableTolerance feature gate.)
|
|
676
|
+
:param Sequence['HPAScalingPolicyArgs'] policies: policies is a list of potential scaling polices which can be used during scaling. If not set, use the default values: - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window. - For scale down: allow all pods to be removed in a 15s window.
|
|
668
677
|
:param builtins.str select_policy: selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.
|
|
669
678
|
:param builtins.int stabilization_window_seconds: stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).
|
|
679
|
+
:param builtins.str tolerance: tolerance is the tolerance on the ratio between the current and desired metric value under which no updates are made to the desired number of replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not set, the default cluster-wide tolerance is applied (by default 10%).
|
|
680
|
+
|
|
681
|
+
For example, if autoscaling is configured with a memory consumption target of 100Mi, and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
|
|
682
|
+
|
|
683
|
+
This is an alpha field and requires enabling the HPAConfigurableTolerance feature gate.
|
|
670
684
|
"""
|
|
671
685
|
if policies is not None:
|
|
672
686
|
pulumi.set(__self__, "policies", policies)
|
|
@@ -674,12 +688,14 @@ class HPAScalingRules(dict):
|
|
|
674
688
|
pulumi.set(__self__, "select_policy", select_policy)
|
|
675
689
|
if stabilization_window_seconds is not None:
|
|
676
690
|
pulumi.set(__self__, "stabilization_window_seconds", stabilization_window_seconds)
|
|
691
|
+
if tolerance is not None:
|
|
692
|
+
pulumi.set(__self__, "tolerance", tolerance)
|
|
677
693
|
|
|
678
694
|
@property
|
|
679
695
|
@pulumi.getter
|
|
680
696
|
def policies(self) -> Optional[Sequence['outputs.HPAScalingPolicy']]:
|
|
681
697
|
"""
|
|
682
|
-
policies is a list of potential scaling polices which can be used during scaling.
|
|
698
|
+
policies is a list of potential scaling polices which can be used during scaling. If not set, use the default values: - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window. - For scale down: allow all pods to be removed in a 15s window.
|
|
683
699
|
"""
|
|
684
700
|
return pulumi.get(self, "policies")
|
|
685
701
|
|
|
@@ -699,11 +715,27 @@ class HPAScalingRules(dict):
|
|
|
699
715
|
"""
|
|
700
716
|
return pulumi.get(self, "stabilization_window_seconds")
|
|
701
717
|
|
|
718
|
+
@property
|
|
719
|
+
@pulumi.getter
|
|
720
|
+
def tolerance(self) -> Optional[builtins.str]:
|
|
721
|
+
"""
|
|
722
|
+
tolerance is the tolerance on the ratio between the current and desired metric value under which no updates are made to the desired number of replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not set, the default cluster-wide tolerance is applied (by default 10%).
|
|
723
|
+
|
|
724
|
+
For example, if autoscaling is configured with a memory consumption target of 100Mi, and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
|
|
725
|
+
|
|
726
|
+
This is an alpha field and requires enabling the HPAConfigurableTolerance feature gate.
|
|
727
|
+
"""
|
|
728
|
+
return pulumi.get(self, "tolerance")
|
|
729
|
+
|
|
702
730
|
|
|
703
731
|
@pulumi.output_type
|
|
704
732
|
class HPAScalingRulesPatch(dict):
|
|
705
733
|
"""
|
|
706
|
-
HPAScalingRules configures the scaling behavior for one direction
|
|
734
|
+
HPAScalingRules configures the scaling behavior for one direction via scaling Policy Rules and a configurable metric tolerance.
|
|
735
|
+
|
|
736
|
+
Scaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.
|
|
737
|
+
|
|
738
|
+
The tolerance is applied to the metric values and prevents scaling too eagerly for small metric variations. (Note that setting a tolerance requires enabling the alpha HPAConfigurableTolerance feature gate.)
|
|
707
739
|
"""
|
|
708
740
|
@staticmethod
|
|
709
741
|
def __key_warning(key: str):
|
|
@@ -727,12 +759,22 @@ class HPAScalingRulesPatch(dict):
|
|
|
727
759
|
def __init__(__self__, *,
|
|
728
760
|
policies: Optional[Sequence['outputs.HPAScalingPolicyPatch']] = None,
|
|
729
761
|
select_policy: Optional[builtins.str] = None,
|
|
730
|
-
stabilization_window_seconds: Optional[builtins.int] = None
|
|
762
|
+
stabilization_window_seconds: Optional[builtins.int] = None,
|
|
763
|
+
tolerance: Optional[builtins.str] = None):
|
|
731
764
|
"""
|
|
732
|
-
HPAScalingRules configures the scaling behavior for one direction
|
|
733
|
-
|
|
765
|
+
HPAScalingRules configures the scaling behavior for one direction via scaling Policy Rules and a configurable metric tolerance.
|
|
766
|
+
|
|
767
|
+
Scaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.
|
|
768
|
+
|
|
769
|
+
The tolerance is applied to the metric values and prevents scaling too eagerly for small metric variations. (Note that setting a tolerance requires enabling the alpha HPAConfigurableTolerance feature gate.)
|
|
770
|
+
:param Sequence['HPAScalingPolicyPatchArgs'] policies: policies is a list of potential scaling polices which can be used during scaling. If not set, use the default values: - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window. - For scale down: allow all pods to be removed in a 15s window.
|
|
734
771
|
:param builtins.str select_policy: selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.
|
|
735
772
|
:param builtins.int stabilization_window_seconds: stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).
|
|
773
|
+
:param builtins.str tolerance: tolerance is the tolerance on the ratio between the current and desired metric value under which no updates are made to the desired number of replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not set, the default cluster-wide tolerance is applied (by default 10%).
|
|
774
|
+
|
|
775
|
+
For example, if autoscaling is configured with a memory consumption target of 100Mi, and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
|
|
776
|
+
|
|
777
|
+
This is an alpha field and requires enabling the HPAConfigurableTolerance feature gate.
|
|
736
778
|
"""
|
|
737
779
|
if policies is not None:
|
|
738
780
|
pulumi.set(__self__, "policies", policies)
|
|
@@ -740,12 +782,14 @@ class HPAScalingRulesPatch(dict):
|
|
|
740
782
|
pulumi.set(__self__, "select_policy", select_policy)
|
|
741
783
|
if stabilization_window_seconds is not None:
|
|
742
784
|
pulumi.set(__self__, "stabilization_window_seconds", stabilization_window_seconds)
|
|
785
|
+
if tolerance is not None:
|
|
786
|
+
pulumi.set(__self__, "tolerance", tolerance)
|
|
743
787
|
|
|
744
788
|
@property
|
|
745
789
|
@pulumi.getter
|
|
746
790
|
def policies(self) -> Optional[Sequence['outputs.HPAScalingPolicyPatch']]:
|
|
747
791
|
"""
|
|
748
|
-
policies is a list of potential scaling polices which can be used during scaling.
|
|
792
|
+
policies is a list of potential scaling polices which can be used during scaling. If not set, use the default values: - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window. - For scale down: allow all pods to be removed in a 15s window.
|
|
749
793
|
"""
|
|
750
794
|
return pulumi.get(self, "policies")
|
|
751
795
|
|
|
@@ -765,6 +809,18 @@ class HPAScalingRulesPatch(dict):
|
|
|
765
809
|
"""
|
|
766
810
|
return pulumi.get(self, "stabilization_window_seconds")
|
|
767
811
|
|
|
812
|
+
@property
|
|
813
|
+
@pulumi.getter
|
|
814
|
+
def tolerance(self) -> Optional[builtins.str]:
|
|
815
|
+
"""
|
|
816
|
+
tolerance is the tolerance on the ratio between the current and desired metric value under which no updates are made to the desired number of replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not set, the default cluster-wide tolerance is applied (by default 10%).
|
|
817
|
+
|
|
818
|
+
For example, if autoscaling is configured with a memory consumption target of 100Mi, and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
|
|
819
|
+
|
|
820
|
+
This is an alpha field and requires enabling the HPAConfigurableTolerance feature gate.
|
|
821
|
+
"""
|
|
822
|
+
return pulumi.get(self, "tolerance")
|
|
823
|
+
|
|
768
824
|
|
|
769
825
|
@pulumi.output_type
|
|
770
826
|
class HorizontalPodAutoscaler(dict):
|
|
@@ -92,10 +92,8 @@ class HorizontalPodAutoscalerInitArgs:
|
|
|
92
92
|
pulumi.set(self, "spec", value)
|
|
93
93
|
|
|
94
94
|
|
|
95
|
+
@pulumi.type_token("kubernetes:autoscaling/v2beta1:HorizontalPodAutoscaler")
|
|
95
96
|
class HorizontalPodAutoscaler(pulumi.CustomResource):
|
|
96
|
-
|
|
97
|
-
pulumi_type = "kubernetes:autoscaling/v2beta1:HorizontalPodAutoscaler"
|
|
98
|
-
|
|
99
97
|
@overload
|
|
100
98
|
def __init__(__self__,
|
|
101
99
|
resource_name: str,
|
|
@@ -91,10 +91,8 @@ class HorizontalPodAutoscalerListArgs:
|
|
|
91
91
|
pulumi.set(self, "metadata", value)
|
|
92
92
|
|
|
93
93
|
|
|
94
|
+
@pulumi.type_token("kubernetes:autoscaling/v2beta1:HorizontalPodAutoscalerList")
|
|
94
95
|
class HorizontalPodAutoscalerList(pulumi.CustomResource):
|
|
95
|
-
|
|
96
|
-
pulumi_type = "kubernetes:autoscaling/v2beta1:HorizontalPodAutoscalerList"
|
|
97
|
-
|
|
98
96
|
@overload
|
|
99
97
|
def __init__(__self__,
|
|
100
98
|
resource_name: str,
|
|
@@ -92,10 +92,8 @@ class HorizontalPodAutoscalerPatchArgs:
|
|
|
92
92
|
pulumi.set(self, "spec", value)
|
|
93
93
|
|
|
94
94
|
|
|
95
|
+
@pulumi.type_token("kubernetes:autoscaling/v2beta1:HorizontalPodAutoscalerPatch")
|
|
95
96
|
class HorizontalPodAutoscalerPatch(pulumi.CustomResource):
|
|
96
|
-
|
|
97
|
-
pulumi_type = "kubernetes:autoscaling/v2beta1:HorizontalPodAutoscalerPatch"
|
|
98
|
-
|
|
99
97
|
@overload
|
|
100
98
|
def __init__(__self__,
|
|
101
99
|
resource_name: str,
|
|
@@ -92,10 +92,8 @@ class HorizontalPodAutoscalerInitArgs:
|
|
|
92
92
|
pulumi.set(self, "spec", value)
|
|
93
93
|
|
|
94
94
|
|
|
95
|
+
@pulumi.type_token("kubernetes:autoscaling/v2beta2:HorizontalPodAutoscaler")
|
|
95
96
|
class HorizontalPodAutoscaler(pulumi.CustomResource):
|
|
96
|
-
|
|
97
|
-
pulumi_type = "kubernetes:autoscaling/v2beta2:HorizontalPodAutoscaler"
|
|
98
|
-
|
|
99
97
|
@overload
|
|
100
98
|
def __init__(__self__,
|
|
101
99
|
resource_name: str,
|
|
@@ -91,10 +91,8 @@ class HorizontalPodAutoscalerListArgs:
|
|
|
91
91
|
pulumi.set(self, "metadata", value)
|
|
92
92
|
|
|
93
93
|
|
|
94
|
+
@pulumi.type_token("kubernetes:autoscaling/v2beta2:HorizontalPodAutoscalerList")
|
|
94
95
|
class HorizontalPodAutoscalerList(pulumi.CustomResource):
|
|
95
|
-
|
|
96
|
-
pulumi_type = "kubernetes:autoscaling/v2beta2:HorizontalPodAutoscalerList"
|
|
97
|
-
|
|
98
96
|
@overload
|
|
99
97
|
def __init__(__self__,
|
|
100
98
|
resource_name: str,
|
|
@@ -92,10 +92,8 @@ class HorizontalPodAutoscalerPatchArgs:
|
|
|
92
92
|
pulumi.set(self, "spec", value)
|
|
93
93
|
|
|
94
94
|
|
|
95
|
+
@pulumi.type_token("kubernetes:autoscaling/v2beta2:HorizontalPodAutoscalerPatch")
|
|
95
96
|
class HorizontalPodAutoscalerPatch(pulumi.CustomResource):
|
|
96
|
-
|
|
97
|
-
pulumi_type = "kubernetes:autoscaling/v2beta2:HorizontalPodAutoscalerPatch"
|
|
98
|
-
|
|
99
97
|
@overload
|
|
100
98
|
def __init__(__self__,
|
|
101
99
|
resource_name: str,
|
|
@@ -93,10 +93,8 @@ class CronJobInitArgs:
|
|
|
93
93
|
pulumi.set(self, "spec", value)
|
|
94
94
|
|
|
95
95
|
|
|
96
|
+
@pulumi.type_token("kubernetes:batch/v1:CronJob")
|
|
96
97
|
class CronJob(pulumi.CustomResource):
|
|
97
|
-
|
|
98
|
-
pulumi_type = "kubernetes:batch/v1:CronJob"
|
|
99
|
-
|
|
100
98
|
@overload
|
|
101
99
|
def __init__(__self__,
|
|
102
100
|
resource_name: str,
|
|
@@ -92,10 +92,8 @@ class CronJobListArgs:
|
|
|
92
92
|
pulumi.set(self, "metadata", value)
|
|
93
93
|
|
|
94
94
|
|
|
95
|
+
@pulumi.type_token("kubernetes:batch/v1:CronJobList")
|
|
95
96
|
class CronJobList(pulumi.CustomResource):
|
|
96
|
-
|
|
97
|
-
pulumi_type = "kubernetes:batch/v1:CronJobList"
|
|
98
|
-
|
|
99
97
|
@overload
|
|
100
98
|
def __init__(__self__,
|
|
101
99
|
resource_name: str,
|
|
@@ -93,10 +93,8 @@ class CronJobPatchArgs:
|
|
|
93
93
|
pulumi.set(self, "spec", value)
|
|
94
94
|
|
|
95
95
|
|
|
96
|
+
@pulumi.type_token("kubernetes:batch/v1:CronJobPatch")
|
|
96
97
|
class CronJobPatch(pulumi.CustomResource):
|
|
97
|
-
|
|
98
|
-
pulumi_type = "kubernetes:batch/v1:CronJobPatch"
|
|
99
|
-
|
|
100
98
|
@overload
|
|
101
99
|
def __init__(__self__,
|
|
102
100
|
resource_name: str,
|
|
@@ -93,10 +93,8 @@ class JobInitArgs:
|
|
|
93
93
|
pulumi.set(self, "spec", value)
|
|
94
94
|
|
|
95
95
|
|
|
96
|
+
@pulumi.type_token("kubernetes:batch/v1:Job")
|
|
96
97
|
class Job(pulumi.CustomResource):
|
|
97
|
-
|
|
98
|
-
pulumi_type = "kubernetes:batch/v1:Job"
|
|
99
|
-
|
|
100
98
|
@overload
|
|
101
99
|
def __init__(__self__,
|
|
102
100
|
resource_name: str,
|
|
@@ -92,10 +92,8 @@ class JobListArgs:
|
|
|
92
92
|
pulumi.set(self, "metadata", value)
|
|
93
93
|
|
|
94
94
|
|
|
95
|
+
@pulumi.type_token("kubernetes:batch/v1:JobList")
|
|
95
96
|
class JobList(pulumi.CustomResource):
|
|
96
|
-
|
|
97
|
-
pulumi_type = "kubernetes:batch/v1:JobList"
|
|
98
|
-
|
|
99
97
|
@overload
|
|
100
98
|
def __init__(__self__,
|
|
101
99
|
resource_name: str,
|
|
@@ -93,10 +93,8 @@ class JobPatchArgs:
|
|
|
93
93
|
pulumi.set(self, "spec", value)
|
|
94
94
|
|
|
95
95
|
|
|
96
|
+
@pulumi.type_token("kubernetes:batch/v1:JobPatch")
|
|
96
97
|
class JobPatch(pulumi.CustomResource):
|
|
97
|
-
|
|
98
|
-
pulumi_type = "kubernetes:batch/v1:JobPatch"
|
|
99
|
-
|
|
100
98
|
@overload
|
|
101
99
|
def __init__(__self__,
|
|
102
100
|
resource_name: str,
|
|
@@ -773,7 +773,7 @@ if not MYPY:
|
|
|
773
773
|
"""
|
|
774
774
|
backoff_limit_per_index: NotRequired[pulumi.Input[builtins.int]]
|
|
775
775
|
"""
|
|
776
|
-
Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.
|
|
776
|
+
Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.
|
|
777
777
|
"""
|
|
778
778
|
completion_mode: NotRequired[pulumi.Input[builtins.str]]
|
|
779
779
|
"""
|
|
@@ -801,7 +801,7 @@ if not MYPY:
|
|
|
801
801
|
"""
|
|
802
802
|
max_failed_indexes: NotRequired[pulumi.Input[builtins.int]]
|
|
803
803
|
"""
|
|
804
|
-
Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.
|
|
804
|
+
Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.
|
|
805
805
|
"""
|
|
806
806
|
parallelism: NotRequired[pulumi.Input[builtins.int]]
|
|
807
807
|
"""
|
|
@@ -827,8 +827,6 @@ if not MYPY:
|
|
|
827
827
|
success_policy: NotRequired[pulumi.Input['SuccessPolicyPatchArgsDict']]
|
|
828
828
|
"""
|
|
829
829
|
successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.
|
|
830
|
-
|
|
831
|
-
This field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).
|
|
832
830
|
"""
|
|
833
831
|
suspend: NotRequired[pulumi.Input[builtins.bool]]
|
|
834
832
|
"""
|
|
@@ -868,7 +866,7 @@ class JobSpecPatchArgs:
|
|
|
868
866
|
JobSpec describes how the job execution will look like.
|
|
869
867
|
:param pulumi.Input[builtins.int] active_deadline_seconds: Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.
|
|
870
868
|
:param pulumi.Input[builtins.int] backoff_limit: Specifies the number of retries before marking this job failed. Defaults to 6
|
|
871
|
-
:param pulumi.Input[builtins.int] backoff_limit_per_index: Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.
|
|
869
|
+
:param pulumi.Input[builtins.int] backoff_limit_per_index: Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.
|
|
872
870
|
:param pulumi.Input[builtins.str] completion_mode: completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.
|
|
873
871
|
|
|
874
872
|
`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.
|
|
@@ -881,7 +879,7 @@ class JobSpecPatchArgs:
|
|
|
881
879
|
|
|
882
880
|
This field is beta-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (enabled by default).
|
|
883
881
|
:param pulumi.Input[builtins.bool] manual_selector: manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector
|
|
884
|
-
:param pulumi.Input[builtins.int] max_failed_indexes: Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.
|
|
882
|
+
:param pulumi.Input[builtins.int] max_failed_indexes: Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.
|
|
885
883
|
:param pulumi.Input[builtins.int] parallelism: Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
|
|
886
884
|
:param pulumi.Input['PodFailurePolicyPatchArgs'] pod_failure_policy: Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.
|
|
887
885
|
:param pulumi.Input[builtins.str] pod_replacement_policy: podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods
|
|
@@ -892,8 +890,6 @@ class JobSpecPatchArgs:
|
|
|
892
890
|
When using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.
|
|
893
891
|
:param pulumi.Input['_meta.v1.LabelSelectorPatchArgs'] selector: A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
|
|
894
892
|
:param pulumi.Input['SuccessPolicyPatchArgs'] success_policy: successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.
|
|
895
|
-
|
|
896
|
-
This field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).
|
|
897
893
|
:param pulumi.Input[builtins.bool] suspend: suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.
|
|
898
894
|
:param pulumi.Input['_core.v1.PodTemplateSpecPatchArgs'] template: Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
|
|
899
895
|
:param pulumi.Input[builtins.int] ttl_seconds_after_finished: ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.
|
|
@@ -959,7 +955,7 @@ class JobSpecPatchArgs:
|
|
|
959
955
|
@pulumi.getter(name="backoffLimitPerIndex")
|
|
960
956
|
def backoff_limit_per_index(self) -> Optional[pulumi.Input[builtins.int]]:
|
|
961
957
|
"""
|
|
962
|
-
Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.
|
|
958
|
+
Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.
|
|
963
959
|
"""
|
|
964
960
|
return pulumi.get(self, "backoff_limit_per_index")
|
|
965
961
|
|
|
@@ -1027,7 +1023,7 @@ class JobSpecPatchArgs:
|
|
|
1027
1023
|
@pulumi.getter(name="maxFailedIndexes")
|
|
1028
1024
|
def max_failed_indexes(self) -> Optional[pulumi.Input[builtins.int]]:
|
|
1029
1025
|
"""
|
|
1030
|
-
Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.
|
|
1026
|
+
Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.
|
|
1031
1027
|
"""
|
|
1032
1028
|
return pulumi.get(self, "max_failed_indexes")
|
|
1033
1029
|
|
|
@@ -1093,8 +1089,6 @@ class JobSpecPatchArgs:
|
|
|
1093
1089
|
def success_policy(self) -> Optional[pulumi.Input['SuccessPolicyPatchArgs']]:
|
|
1094
1090
|
"""
|
|
1095
1091
|
successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.
|
|
1096
|
-
|
|
1097
|
-
This field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).
|
|
1098
1092
|
"""
|
|
1099
1093
|
return pulumi.get(self, "success_policy")
|
|
1100
1094
|
|
|
@@ -1158,7 +1152,7 @@ if not MYPY:
|
|
|
1158
1152
|
"""
|
|
1159
1153
|
backoff_limit_per_index: NotRequired[pulumi.Input[builtins.int]]
|
|
1160
1154
|
"""
|
|
1161
|
-
Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.
|
|
1155
|
+
Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.
|
|
1162
1156
|
"""
|
|
1163
1157
|
completion_mode: NotRequired[pulumi.Input[builtins.str]]
|
|
1164
1158
|
"""
|
|
@@ -1186,7 +1180,7 @@ if not MYPY:
|
|
|
1186
1180
|
"""
|
|
1187
1181
|
max_failed_indexes: NotRequired[pulumi.Input[builtins.int]]
|
|
1188
1182
|
"""
|
|
1189
|
-
Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.
|
|
1183
|
+
Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.
|
|
1190
1184
|
"""
|
|
1191
1185
|
parallelism: NotRequired[pulumi.Input[builtins.int]]
|
|
1192
1186
|
"""
|
|
@@ -1212,8 +1206,6 @@ if not MYPY:
|
|
|
1212
1206
|
success_policy: NotRequired[pulumi.Input['SuccessPolicyArgsDict']]
|
|
1213
1207
|
"""
|
|
1214
1208
|
successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.
|
|
1215
|
-
|
|
1216
|
-
This field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).
|
|
1217
1209
|
"""
|
|
1218
1210
|
suspend: NotRequired[pulumi.Input[builtins.bool]]
|
|
1219
1211
|
"""
|
|
@@ -1250,7 +1242,7 @@ class JobSpecArgs:
|
|
|
1250
1242
|
:param pulumi.Input['_core.v1.PodTemplateSpecArgs'] template: Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
|
|
1251
1243
|
:param pulumi.Input[builtins.int] active_deadline_seconds: Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.
|
|
1252
1244
|
:param pulumi.Input[builtins.int] backoff_limit: Specifies the number of retries before marking this job failed. Defaults to 6
|
|
1253
|
-
:param pulumi.Input[builtins.int] backoff_limit_per_index: Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.
|
|
1245
|
+
:param pulumi.Input[builtins.int] backoff_limit_per_index: Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.
|
|
1254
1246
|
:param pulumi.Input[builtins.str] completion_mode: completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.
|
|
1255
1247
|
|
|
1256
1248
|
`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.
|
|
@@ -1263,7 +1255,7 @@ class JobSpecArgs:
|
|
|
1263
1255
|
|
|
1264
1256
|
This field is beta-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (enabled by default).
|
|
1265
1257
|
:param pulumi.Input[builtins.bool] manual_selector: manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector
|
|
1266
|
-
:param pulumi.Input[builtins.int] max_failed_indexes: Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.
|
|
1258
|
+
:param pulumi.Input[builtins.int] max_failed_indexes: Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.
|
|
1267
1259
|
:param pulumi.Input[builtins.int] parallelism: Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
|
|
1268
1260
|
:param pulumi.Input['PodFailurePolicyArgs'] pod_failure_policy: Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.
|
|
1269
1261
|
:param pulumi.Input[builtins.str] pod_replacement_policy: podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods
|
|
@@ -1274,8 +1266,6 @@ class JobSpecArgs:
|
|
|
1274
1266
|
When using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.
|
|
1275
1267
|
:param pulumi.Input['_meta.v1.LabelSelectorArgs'] selector: A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
|
|
1276
1268
|
:param pulumi.Input['SuccessPolicyArgs'] success_policy: successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.
|
|
1277
|
-
|
|
1278
|
-
This field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).
|
|
1279
1269
|
:param pulumi.Input[builtins.bool] suspend: suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.
|
|
1280
1270
|
:param pulumi.Input[builtins.int] ttl_seconds_after_finished: ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.
|
|
1281
1271
|
"""
|
|
@@ -1351,7 +1341,7 @@ class JobSpecArgs:
|
|
|
1351
1341
|
@pulumi.getter(name="backoffLimitPerIndex")
|
|
1352
1342
|
def backoff_limit_per_index(self) -> Optional[pulumi.Input[builtins.int]]:
|
|
1353
1343
|
"""
|
|
1354
|
-
Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.
|
|
1344
|
+
Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.
|
|
1355
1345
|
"""
|
|
1356
1346
|
return pulumi.get(self, "backoff_limit_per_index")
|
|
1357
1347
|
|
|
@@ -1419,7 +1409,7 @@ class JobSpecArgs:
|
|
|
1419
1409
|
@pulumi.getter(name="maxFailedIndexes")
|
|
1420
1410
|
def max_failed_indexes(self) -> Optional[pulumi.Input[builtins.int]]:
|
|
1421
1411
|
"""
|
|
1422
|
-
Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.
|
|
1412
|
+
Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.
|
|
1423
1413
|
"""
|
|
1424
1414
|
return pulumi.get(self, "max_failed_indexes")
|
|
1425
1415
|
|
|
@@ -1485,8 +1475,6 @@ class JobSpecArgs:
|
|
|
1485
1475
|
def success_policy(self) -> Optional[pulumi.Input['SuccessPolicyArgs']]:
|
|
1486
1476
|
"""
|
|
1487
1477
|
successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.
|
|
1488
|
-
|
|
1489
|
-
This field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).
|
|
1490
1478
|
"""
|
|
1491
1479
|
return pulumi.get(self, "success_policy")
|
|
1492
1480
|
|
|
@@ -1551,8 +1539,6 @@ if not MYPY:
|
|
|
1551
1539
|
failed_indexes: NotRequired[pulumi.Input[builtins.str]]
|
|
1552
1540
|
"""
|
|
1553
1541
|
FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as "1,3-5,7". The set of failed indexes cannot overlap with the set of completed indexes.
|
|
1554
|
-
|
|
1555
|
-
This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
|
|
1556
1542
|
"""
|
|
1557
1543
|
ready: NotRequired[pulumi.Input[builtins.int]]
|
|
1558
1544
|
"""
|
|
@@ -1614,8 +1600,6 @@ class JobStatusArgs:
|
|
|
1614
1600
|
More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
|
|
1615
1601
|
:param pulumi.Input[builtins.int] failed: The number of pods which reached phase Failed. The value increases monotonically.
|
|
1616
1602
|
:param pulumi.Input[builtins.str] failed_indexes: FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as "1,3-5,7". The set of failed indexes cannot overlap with the set of completed indexes.
|
|
1617
|
-
|
|
1618
|
-
This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
|
|
1619
1603
|
:param pulumi.Input[builtins.int] ready: The number of active pods which have a Ready condition and are not terminating (without a deletionTimestamp).
|
|
1620
1604
|
:param pulumi.Input[builtins.str] start_time: Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.
|
|
1621
1605
|
|
|
@@ -1725,8 +1709,6 @@ class JobStatusArgs:
|
|
|
1725
1709
|
def failed_indexes(self) -> Optional[pulumi.Input[builtins.str]]:
|
|
1726
1710
|
"""
|
|
1727
1711
|
FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as "1,3-5,7". The set of failed indexes cannot overlap with the set of completed indexes.
|
|
1728
|
-
|
|
1729
|
-
This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
|
|
1730
1712
|
"""
|
|
1731
1713
|
return pulumi.get(self, "failed_indexes")
|
|
1732
1714
|
|
|
@@ -2431,8 +2413,6 @@ if not MYPY:
|
|
|
2431
2413
|
running pods are terminated.
|
|
2432
2414
|
- FailIndex: indicates that the pod's index is marked as Failed and will
|
|
2433
2415
|
not be restarted.
|
|
2434
|
-
This value is beta-level. It can be used when the
|
|
2435
|
-
`JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
|
|
2436
2416
|
- Ignore: indicates that the counter towards the .backoffLimit is not
|
|
2437
2417
|
incremented and a replacement pod is created.
|
|
2438
2418
|
- Count: indicates that the pod is handled in the default way - the
|
|
@@ -2464,8 +2444,6 @@ class PodFailurePolicyRulePatchArgs:
|
|
|
2464
2444
|
running pods are terminated.
|
|
2465
2445
|
- FailIndex: indicates that the pod's index is marked as Failed and will
|
|
2466
2446
|
not be restarted.
|
|
2467
|
-
This value is beta-level. It can be used when the
|
|
2468
|
-
`JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
|
|
2469
2447
|
- Ignore: indicates that the counter towards the .backoffLimit is not
|
|
2470
2448
|
incremented and a replacement pod is created.
|
|
2471
2449
|
- Count: indicates that the pod is handled in the default way - the
|
|
@@ -2491,8 +2469,6 @@ class PodFailurePolicyRulePatchArgs:
|
|
|
2491
2469
|
running pods are terminated.
|
|
2492
2470
|
- FailIndex: indicates that the pod's index is marked as Failed and will
|
|
2493
2471
|
not be restarted.
|
|
2494
|
-
This value is beta-level. It can be used when the
|
|
2495
|
-
`JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
|
|
2496
2472
|
- Ignore: indicates that the counter towards the .backoffLimit is not
|
|
2497
2473
|
incremented and a replacement pod is created.
|
|
2498
2474
|
- Count: indicates that the pod is handled in the default way - the
|
|
@@ -2543,8 +2519,6 @@ if not MYPY:
|
|
|
2543
2519
|
running pods are terminated.
|
|
2544
2520
|
- FailIndex: indicates that the pod's index is marked as Failed and will
|
|
2545
2521
|
not be restarted.
|
|
2546
|
-
This value is beta-level. It can be used when the
|
|
2547
|
-
`JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
|
|
2548
2522
|
- Ignore: indicates that the counter towards the .backoffLimit is not
|
|
2549
2523
|
incremented and a replacement pod is created.
|
|
2550
2524
|
- Count: indicates that the pod is handled in the default way - the
|
|
@@ -2576,8 +2550,6 @@ class PodFailurePolicyRuleArgs:
|
|
|
2576
2550
|
running pods are terminated.
|
|
2577
2551
|
- FailIndex: indicates that the pod's index is marked as Failed and will
|
|
2578
2552
|
not be restarted.
|
|
2579
|
-
This value is beta-level. It can be used when the
|
|
2580
|
-
`JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
|
|
2581
2553
|
- Ignore: indicates that the counter towards the .backoffLimit is not
|
|
2582
2554
|
incremented and a replacement pod is created.
|
|
2583
2555
|
- Count: indicates that the pod is handled in the default way - the
|
|
@@ -2602,8 +2574,6 @@ class PodFailurePolicyRuleArgs:
|
|
|
2602
2574
|
running pods are terminated.
|
|
2603
2575
|
- FailIndex: indicates that the pod's index is marked as Failed and will
|
|
2604
2576
|
not be restarted.
|
|
2605
|
-
This value is beta-level. It can be used when the
|
|
2606
|
-
`JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
|
|
2607
2577
|
- Ignore: indicates that the counter towards the .backoffLimit is not
|
|
2608
2578
|
incremented and a replacement pod is created.
|
|
2609
2579
|
- Count: indicates that the pod is handled in the default way - the
|