pulumi-kubernetes 4.23.0a1746131759__py3-none-any.whl → 4.23.0a1746153578__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-kubernetes might be problematic. Click here for more details.
- pulumi_kubernetes/__init__.py +36 -2
- pulumi_kubernetes/admissionregistration/v1/MutatingWebhookConfiguration.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/MutatingWebhookConfigurationList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/MutatingWebhookConfigurationPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingAdmissionPolicy.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingAdmissionPolicyBinding.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingAdmissionPolicyBindingList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingAdmissionPolicyBindingPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingAdmissionPolicyList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingAdmissionPolicyPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingWebhookConfiguration.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingWebhookConfigurationList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1/ValidatingWebhookConfigurationPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/MutatingAdmissionPolicy.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/MutatingAdmissionPolicyBinding.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/MutatingAdmissionPolicyBindingList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/MutatingAdmissionPolicyBindingPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/MutatingAdmissionPolicyList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/MutatingAdmissionPolicyPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/ValidatingAdmissionPolicy.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/ValidatingAdmissionPolicyBinding.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/ValidatingAdmissionPolicyBindingList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/ValidatingAdmissionPolicyBindingPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/ValidatingAdmissionPolicyList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/ValidatingAdmissionPolicyPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1alpha1/_inputs.py +30 -30
- pulumi_kubernetes/admissionregistration/v1alpha1/outputs.py +20 -20
- pulumi_kubernetes/admissionregistration/v1beta1/MutatingWebhookConfiguration.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/MutatingWebhookConfigurationList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/MutatingWebhookConfigurationPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingAdmissionPolicy.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingAdmissionPolicyBinding.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingAdmissionPolicyBindingList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingAdmissionPolicyBindingPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingAdmissionPolicyList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingAdmissionPolicyPatch.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingWebhookConfiguration.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingWebhookConfigurationList.py +1 -3
- pulumi_kubernetes/admissionregistration/v1beta1/ValidatingWebhookConfigurationPatch.py +1 -3
- pulumi_kubernetes/apiextensions/v1/CustomResourceDefinition.py +1 -3
- pulumi_kubernetes/apiextensions/v1/CustomResourceDefinitionList.py +1 -3
- pulumi_kubernetes/apiextensions/v1/CustomResourceDefinitionPatch.py +1 -3
- pulumi_kubernetes/apiextensions/v1beta1/CustomResourceDefinition.py +1 -3
- pulumi_kubernetes/apiextensions/v1beta1/CustomResourceDefinitionList.py +1 -3
- pulumi_kubernetes/apiextensions/v1beta1/CustomResourceDefinitionPatch.py +1 -3
- pulumi_kubernetes/apiregistration/v1/APIService.py +1 -3
- pulumi_kubernetes/apiregistration/v1/APIServiceList.py +1 -3
- pulumi_kubernetes/apiregistration/v1/APIServicePatch.py +1 -3
- pulumi_kubernetes/apiregistration/v1beta1/APIService.py +1 -3
- pulumi_kubernetes/apiregistration/v1beta1/APIServiceList.py +1 -3
- pulumi_kubernetes/apiregistration/v1beta1/APIServicePatch.py +1 -3
- pulumi_kubernetes/apps/v1/ControllerRevision.py +1 -3
- pulumi_kubernetes/apps/v1/ControllerRevisionList.py +1 -3
- pulumi_kubernetes/apps/v1/ControllerRevisionPatch.py +1 -3
- pulumi_kubernetes/apps/v1/DaemonSet.py +1 -3
- pulumi_kubernetes/apps/v1/DaemonSetList.py +1 -3
- pulumi_kubernetes/apps/v1/DaemonSetPatch.py +1 -3
- pulumi_kubernetes/apps/v1/Deployment.py +1 -3
- pulumi_kubernetes/apps/v1/DeploymentList.py +1 -3
- pulumi_kubernetes/apps/v1/DeploymentPatch.py +1 -3
- pulumi_kubernetes/apps/v1/ReplicaSet.py +1 -3
- pulumi_kubernetes/apps/v1/ReplicaSetList.py +5 -7
- pulumi_kubernetes/apps/v1/ReplicaSetPatch.py +1 -3
- pulumi_kubernetes/apps/v1/StatefulSet.py +1 -3
- pulumi_kubernetes/apps/v1/StatefulSetList.py +1 -3
- pulumi_kubernetes/apps/v1/StatefulSetPatch.py +1 -3
- pulumi_kubernetes/apps/v1/_inputs.py +109 -56
- pulumi_kubernetes/apps/v1/outputs.py +129 -56
- pulumi_kubernetes/apps/v1beta1/ControllerRevision.py +1 -3
- pulumi_kubernetes/apps/v1beta1/ControllerRevisionList.py +1 -3
- pulumi_kubernetes/apps/v1beta1/ControllerRevisionPatch.py +1 -3
- pulumi_kubernetes/apps/v1beta1/Deployment.py +1 -3
- pulumi_kubernetes/apps/v1beta1/DeploymentList.py +1 -3
- pulumi_kubernetes/apps/v1beta1/DeploymentPatch.py +1 -3
- pulumi_kubernetes/apps/v1beta1/StatefulSet.py +1 -3
- pulumi_kubernetes/apps/v1beta1/StatefulSetList.py +1 -3
- pulumi_kubernetes/apps/v1beta1/StatefulSetPatch.py +1 -3
- pulumi_kubernetes/apps/v1beta2/ControllerRevision.py +1 -3
- pulumi_kubernetes/apps/v1beta2/ControllerRevisionList.py +1 -3
- pulumi_kubernetes/apps/v1beta2/ControllerRevisionPatch.py +1 -3
- pulumi_kubernetes/apps/v1beta2/DaemonSet.py +1 -3
- pulumi_kubernetes/apps/v1beta2/DaemonSetList.py +1 -3
- pulumi_kubernetes/apps/v1beta2/DaemonSetPatch.py +1 -3
- pulumi_kubernetes/apps/v1beta2/Deployment.py +1 -3
- pulumi_kubernetes/apps/v1beta2/DeploymentList.py +1 -3
- pulumi_kubernetes/apps/v1beta2/DeploymentPatch.py +1 -3
- pulumi_kubernetes/apps/v1beta2/ReplicaSet.py +1 -3
- pulumi_kubernetes/apps/v1beta2/ReplicaSetList.py +1 -3
- pulumi_kubernetes/apps/v1beta2/ReplicaSetPatch.py +1 -3
- pulumi_kubernetes/apps/v1beta2/StatefulSet.py +1 -3
- pulumi_kubernetes/apps/v1beta2/StatefulSetList.py +1 -3
- pulumi_kubernetes/apps/v1beta2/StatefulSetPatch.py +1 -3
- pulumi_kubernetes/auditregistration/v1alpha1/AuditSink.py +1 -3
- pulumi_kubernetes/auditregistration/v1alpha1/AuditSinkList.py +1 -3
- pulumi_kubernetes/auditregistration/v1alpha1/AuditSinkPatch.py +1 -3
- pulumi_kubernetes/autoscaling/v1/HorizontalPodAutoscaler.py +1 -3
- pulumi_kubernetes/autoscaling/v1/HorizontalPodAutoscalerList.py +1 -3
- pulumi_kubernetes/autoscaling/v1/HorizontalPodAutoscalerPatch.py +1 -3
- pulumi_kubernetes/autoscaling/v2/HorizontalPodAutoscaler.py +1 -3
- pulumi_kubernetes/autoscaling/v2/HorizontalPodAutoscalerList.py +1 -3
- pulumi_kubernetes/autoscaling/v2/HorizontalPodAutoscalerPatch.py +1 -3
- pulumi_kubernetes/autoscaling/v2/_inputs.py +92 -12
- pulumi_kubernetes/autoscaling/v2/outputs.py +66 -10
- pulumi_kubernetes/autoscaling/v2beta1/HorizontalPodAutoscaler.py +1 -3
- pulumi_kubernetes/autoscaling/v2beta1/HorizontalPodAutoscalerList.py +1 -3
- pulumi_kubernetes/autoscaling/v2beta1/HorizontalPodAutoscalerPatch.py +1 -3
- pulumi_kubernetes/autoscaling/v2beta2/HorizontalPodAutoscaler.py +1 -3
- pulumi_kubernetes/autoscaling/v2beta2/HorizontalPodAutoscalerList.py +1 -3
- pulumi_kubernetes/autoscaling/v2beta2/HorizontalPodAutoscalerPatch.py +1 -3
- pulumi_kubernetes/batch/v1/CronJob.py +1 -3
- pulumi_kubernetes/batch/v1/CronJobList.py +1 -3
- pulumi_kubernetes/batch/v1/CronJobPatch.py +1 -3
- pulumi_kubernetes/batch/v1/Job.py +1 -3
- pulumi_kubernetes/batch/v1/JobList.py +1 -3
- pulumi_kubernetes/batch/v1/JobPatch.py +1 -3
- pulumi_kubernetes/batch/v1/_inputs.py +12 -42
- pulumi_kubernetes/batch/v1/outputs.py +8 -32
- pulumi_kubernetes/batch/v1beta1/CronJob.py +1 -3
- pulumi_kubernetes/batch/v1beta1/CronJobList.py +1 -3
- pulumi_kubernetes/batch/v1beta1/CronJobPatch.py +1 -3
- pulumi_kubernetes/batch/v2alpha1/CronJob.py +1 -3
- pulumi_kubernetes/batch/v2alpha1/CronJobList.py +1 -3
- pulumi_kubernetes/batch/v2alpha1/CronJobPatch.py +1 -3
- pulumi_kubernetes/certificates/v1/CertificateSigningRequest.py +1 -3
- pulumi_kubernetes/certificates/v1/CertificateSigningRequestList.py +1 -3
- pulumi_kubernetes/certificates/v1/CertificateSigningRequestPatch.py +1 -3
- pulumi_kubernetes/certificates/v1alpha1/ClusterTrustBundle.py +3 -3
- pulumi_kubernetes/certificates/v1alpha1/ClusterTrustBundleList.py +1 -3
- pulumi_kubernetes/certificates/v1alpha1/ClusterTrustBundlePatch.py +3 -3
- pulumi_kubernetes/certificates/v1beta1/CertificateSigningRequest.py +1 -3
- pulumi_kubernetes/certificates/v1beta1/CertificateSigningRequestList.py +1 -3
- pulumi_kubernetes/certificates/v1beta1/CertificateSigningRequestPatch.py +1 -3
- pulumi_kubernetes/certificates/v1beta1/ClusterTrustBundle.py +227 -0
- pulumi_kubernetes/certificates/v1beta1/ClusterTrustBundleList.py +217 -0
- pulumi_kubernetes/certificates/v1beta1/ClusterTrustBundlePatch.py +238 -0
- pulumi_kubernetes/certificates/v1beta1/__init__.py +3 -0
- pulumi_kubernetes/certificates/v1beta1/_inputs.py +292 -0
- pulumi_kubernetes/certificates/v1beta1/outputs.py +241 -0
- pulumi_kubernetes/coordination/v1/Lease.py +1 -3
- pulumi_kubernetes/coordination/v1/LeaseList.py +1 -3
- pulumi_kubernetes/coordination/v1/LeasePatch.py +1 -3
- pulumi_kubernetes/coordination/v1alpha1/LeaseCandidate.py +2 -4
- pulumi_kubernetes/coordination/v1alpha1/LeaseCandidateList.py +1 -3
- pulumi_kubernetes/coordination/v1alpha1/LeaseCandidatePatch.py +2 -4
- pulumi_kubernetes/coordination/v1alpha2/LeaseCandidate.py +2 -4
- pulumi_kubernetes/coordination/v1alpha2/LeaseCandidateList.py +1 -3
- pulumi_kubernetes/coordination/v1alpha2/LeaseCandidatePatch.py +2 -4
- pulumi_kubernetes/coordination/v1alpha2/_inputs.py +6 -6
- pulumi_kubernetes/coordination/v1alpha2/outputs.py +4 -4
- pulumi_kubernetes/coordination/v1beta1/Lease.py +1 -3
- pulumi_kubernetes/coordination/v1beta1/LeaseCandidate.py +218 -0
- pulumi_kubernetes/coordination/v1beta1/LeaseCandidateList.py +217 -0
- pulumi_kubernetes/coordination/v1beta1/LeaseCandidatePatch.py +230 -0
- pulumi_kubernetes/coordination/v1beta1/LeaseList.py +1 -3
- pulumi_kubernetes/coordination/v1beta1/LeasePatch.py +1 -3
- pulumi_kubernetes/coordination/v1beta1/__init__.py +3 -0
- pulumi_kubernetes/coordination/v1beta1/_inputs.py +371 -0
- pulumi_kubernetes/coordination/v1beta1/outputs.py +292 -0
- pulumi_kubernetes/core/v1/Binding.py +1 -3
- pulumi_kubernetes/core/v1/BindingPatch.py +1 -3
- pulumi_kubernetes/core/v1/ConfigMap.py +1 -3
- pulumi_kubernetes/core/v1/ConfigMapList.py +1 -3
- pulumi_kubernetes/core/v1/ConfigMapPatch.py +1 -3
- pulumi_kubernetes/core/v1/Endpoints.py +9 -3
- pulumi_kubernetes/core/v1/EndpointsList.py +3 -5
- pulumi_kubernetes/core/v1/EndpointsPatch.py +9 -3
- pulumi_kubernetes/core/v1/Event.py +1 -3
- pulumi_kubernetes/core/v1/EventList.py +1 -3
- pulumi_kubernetes/core/v1/EventPatch.py +1 -3
- pulumi_kubernetes/core/v1/LimitRange.py +1 -3
- pulumi_kubernetes/core/v1/LimitRangeList.py +1 -3
- pulumi_kubernetes/core/v1/LimitRangePatch.py +1 -3
- pulumi_kubernetes/core/v1/Namespace.py +1 -3
- pulumi_kubernetes/core/v1/NamespaceList.py +1 -3
- pulumi_kubernetes/core/v1/NamespacePatch.py +1 -3
- pulumi_kubernetes/core/v1/Node.py +1 -3
- pulumi_kubernetes/core/v1/NodeList.py +1 -3
- pulumi_kubernetes/core/v1/NodePatch.py +1 -3
- pulumi_kubernetes/core/v1/PersistentVolume.py +1 -3
- pulumi_kubernetes/core/v1/PersistentVolumeClaim.py +1 -3
- pulumi_kubernetes/core/v1/PersistentVolumeClaimList.py +1 -3
- pulumi_kubernetes/core/v1/PersistentVolumeClaimPatch.py +1 -3
- pulumi_kubernetes/core/v1/PersistentVolumeList.py +1 -3
- pulumi_kubernetes/core/v1/PersistentVolumePatch.py +1 -3
- pulumi_kubernetes/core/v1/Pod.py +1 -3
- pulumi_kubernetes/core/v1/PodList.py +1 -3
- pulumi_kubernetes/core/v1/PodPatch.py +1 -3
- pulumi_kubernetes/core/v1/PodTemplate.py +1 -3
- pulumi_kubernetes/core/v1/PodTemplateList.py +1 -3
- pulumi_kubernetes/core/v1/PodTemplatePatch.py +1 -3
- pulumi_kubernetes/core/v1/ReplicationController.py +1 -3
- pulumi_kubernetes/core/v1/ReplicationControllerList.py +1 -3
- pulumi_kubernetes/core/v1/ReplicationControllerPatch.py +1 -3
- pulumi_kubernetes/core/v1/ResourceQuota.py +1 -3
- pulumi_kubernetes/core/v1/ResourceQuotaList.py +1 -3
- pulumi_kubernetes/core/v1/ResourceQuotaPatch.py +1 -3
- pulumi_kubernetes/core/v1/Secret.py +1 -3
- pulumi_kubernetes/core/v1/SecretList.py +1 -3
- pulumi_kubernetes/core/v1/SecretPatch.py +1 -3
- pulumi_kubernetes/core/v1/Service.py +1 -3
- pulumi_kubernetes/core/v1/ServiceAccount.py +1 -3
- pulumi_kubernetes/core/v1/ServiceAccountList.py +1 -3
- pulumi_kubernetes/core/v1/ServiceAccountPatch.py +1 -3
- pulumi_kubernetes/core/v1/ServiceList.py +1 -3
- pulumi_kubernetes/core/v1/ServicePatch.py +1 -3
- pulumi_kubernetes/core/v1/_enums.py +2 -1
- pulumi_kubernetes/core/v1/_inputs.py +240 -66
- pulumi_kubernetes/core/v1/outputs.py +251 -51
- pulumi_kubernetes/discovery/v1/EndpointSlice.py +11 -13
- pulumi_kubernetes/discovery/v1/EndpointSliceList.py +1 -3
- pulumi_kubernetes/discovery/v1/EndpointSlicePatch.py +11 -13
- pulumi_kubernetes/discovery/v1/_inputs.py +159 -44
- pulumi_kubernetes/discovery/v1/outputs.py +107 -32
- pulumi_kubernetes/discovery/v1beta1/EndpointSlice.py +1 -3
- pulumi_kubernetes/discovery/v1beta1/EndpointSliceList.py +1 -3
- pulumi_kubernetes/discovery/v1beta1/EndpointSlicePatch.py +1 -3
- pulumi_kubernetes/events/v1/Event.py +1 -3
- pulumi_kubernetes/events/v1/EventList.py +1 -3
- pulumi_kubernetes/events/v1/EventPatch.py +1 -3
- pulumi_kubernetes/events/v1beta1/Event.py +1 -3
- pulumi_kubernetes/events/v1beta1/EventList.py +1 -3
- pulumi_kubernetes/events/v1beta1/EventPatch.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/DaemonSet.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/DaemonSetList.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/DaemonSetPatch.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/Deployment.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/DeploymentList.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/DeploymentPatch.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/Ingress.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/IngressList.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/IngressPatch.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/NetworkPolicy.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/NetworkPolicyList.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/NetworkPolicyPatch.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/PodSecurityPolicy.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/PodSecurityPolicyList.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/PodSecurityPolicyPatch.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/ReplicaSet.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/ReplicaSetList.py +1 -3
- pulumi_kubernetes/extensions/v1beta1/ReplicaSetPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1/FlowSchema.py +1 -3
- pulumi_kubernetes/flowcontrol/v1/FlowSchemaList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1/FlowSchemaPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1/PriorityLevelConfiguration.py +1 -3
- pulumi_kubernetes/flowcontrol/v1/PriorityLevelConfigurationList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1/PriorityLevelConfigurationPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1alpha1/FlowSchema.py +1 -3
- pulumi_kubernetes/flowcontrol/v1alpha1/FlowSchemaList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1alpha1/FlowSchemaPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1alpha1/PriorityLevelConfiguration.py +1 -3
- pulumi_kubernetes/flowcontrol/v1alpha1/PriorityLevelConfigurationList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1alpha1/PriorityLevelConfigurationPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta1/FlowSchema.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta1/FlowSchemaList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta1/FlowSchemaPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta1/PriorityLevelConfiguration.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta1/PriorityLevelConfigurationList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta1/PriorityLevelConfigurationPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta2/FlowSchema.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta2/FlowSchemaList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta2/FlowSchemaPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta2/PriorityLevelConfiguration.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta2/PriorityLevelConfigurationList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta2/PriorityLevelConfigurationPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta3/FlowSchema.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta3/FlowSchemaList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta3/FlowSchemaPatch.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta3/PriorityLevelConfiguration.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta3/PriorityLevelConfigurationList.py +1 -3
- pulumi_kubernetes/flowcontrol/v1beta3/PriorityLevelConfigurationPatch.py +1 -3
- pulumi_kubernetes/helm/v3/Release.py +1 -3
- pulumi_kubernetes/helm/v4/Chart.py +1 -3
- pulumi_kubernetes/kustomize/v2/Directory.py +1 -3
- pulumi_kubernetes/meta/v1/Status.py +1 -3
- pulumi_kubernetes/meta/v1/StatusPatch.py +1 -3
- pulumi_kubernetes/networking/v1/IPAddress.py +218 -0
- pulumi_kubernetes/networking/v1/IPAddressList.py +217 -0
- pulumi_kubernetes/networking/v1/IPAddressPatch.py +230 -0
- pulumi_kubernetes/networking/v1/Ingress.py +1 -3
- pulumi_kubernetes/networking/v1/IngressClass.py +1 -3
- pulumi_kubernetes/networking/v1/IngressClassList.py +1 -3
- pulumi_kubernetes/networking/v1/IngressClassPatch.py +1 -3
- pulumi_kubernetes/networking/v1/IngressList.py +1 -3
- pulumi_kubernetes/networking/v1/IngressPatch.py +1 -3
- pulumi_kubernetes/networking/v1/NetworkPolicy.py +1 -3
- pulumi_kubernetes/networking/v1/NetworkPolicyList.py +1 -3
- pulumi_kubernetes/networking/v1/NetworkPolicyPatch.py +1 -3
- pulumi_kubernetes/networking/v1/ServiceCIDR.py +228 -0
- pulumi_kubernetes/networking/v1/ServiceCIDRList.py +217 -0
- pulumi_kubernetes/networking/v1/ServiceCIDRPatch.py +240 -0
- pulumi_kubernetes/networking/v1/__init__.py +6 -0
- pulumi_kubernetes/networking/v1/_inputs.py +599 -0
- pulumi_kubernetes/networking/v1/outputs.py +461 -0
- pulumi_kubernetes/networking/v1alpha1/ClusterCIDR.py +1 -3
- pulumi_kubernetes/networking/v1alpha1/ClusterCIDRList.py +1 -3
- pulumi_kubernetes/networking/v1alpha1/ClusterCIDRPatch.py +1 -3
- pulumi_kubernetes/networking/v1alpha1/IPAddress.py +2 -4
- pulumi_kubernetes/networking/v1alpha1/IPAddressList.py +1 -3
- pulumi_kubernetes/networking/v1alpha1/IPAddressPatch.py +2 -4
- pulumi_kubernetes/networking/v1alpha1/ServiceCIDR.py +2 -4
- pulumi_kubernetes/networking/v1alpha1/ServiceCIDRList.py +1 -3
- pulumi_kubernetes/networking/v1alpha1/ServiceCIDRPatch.py +2 -4
- pulumi_kubernetes/networking/v1beta1/IPAddress.py +2 -4
- pulumi_kubernetes/networking/v1beta1/IPAddressList.py +1 -3
- pulumi_kubernetes/networking/v1beta1/IPAddressPatch.py +2 -4
- pulumi_kubernetes/networking/v1beta1/Ingress.py +1 -3
- pulumi_kubernetes/networking/v1beta1/IngressClass.py +1 -3
- pulumi_kubernetes/networking/v1beta1/IngressClassList.py +1 -3
- pulumi_kubernetes/networking/v1beta1/IngressClassPatch.py +1 -3
- pulumi_kubernetes/networking/v1beta1/IngressList.py +1 -3
- pulumi_kubernetes/networking/v1beta1/IngressPatch.py +1 -3
- pulumi_kubernetes/networking/v1beta1/ServiceCIDR.py +2 -4
- pulumi_kubernetes/networking/v1beta1/ServiceCIDRList.py +1 -3
- pulumi_kubernetes/networking/v1beta1/ServiceCIDRPatch.py +2 -4
- pulumi_kubernetes/node/v1/RuntimeClass.py +1 -3
- pulumi_kubernetes/node/v1/RuntimeClassList.py +1 -3
- pulumi_kubernetes/node/v1/RuntimeClassPatch.py +1 -3
- pulumi_kubernetes/node/v1alpha1/RuntimeClass.py +1 -3
- pulumi_kubernetes/node/v1alpha1/RuntimeClassList.py +1 -3
- pulumi_kubernetes/node/v1alpha1/RuntimeClassPatch.py +1 -3
- pulumi_kubernetes/node/v1beta1/RuntimeClass.py +1 -3
- pulumi_kubernetes/node/v1beta1/RuntimeClassList.py +1 -3
- pulumi_kubernetes/node/v1beta1/RuntimeClassPatch.py +1 -3
- pulumi_kubernetes/policy/v1/PodDisruptionBudget.py +1 -3
- pulumi_kubernetes/policy/v1/PodDisruptionBudgetList.py +1 -3
- pulumi_kubernetes/policy/v1/PodDisruptionBudgetPatch.py +1 -3
- pulumi_kubernetes/policy/v1/_inputs.py +0 -12
- pulumi_kubernetes/policy/v1/outputs.py +0 -8
- pulumi_kubernetes/policy/v1beta1/PodDisruptionBudget.py +1 -3
- pulumi_kubernetes/policy/v1beta1/PodDisruptionBudgetList.py +1 -3
- pulumi_kubernetes/policy/v1beta1/PodDisruptionBudgetPatch.py +1 -3
- pulumi_kubernetes/policy/v1beta1/PodSecurityPolicy.py +1 -3
- pulumi_kubernetes/policy/v1beta1/PodSecurityPolicyList.py +1 -3
- pulumi_kubernetes/policy/v1beta1/PodSecurityPolicyPatch.py +1 -3
- pulumi_kubernetes/provider.py +1 -3
- pulumi_kubernetes/pulumi-plugin.json +1 -1
- pulumi_kubernetes/rbac/v1/ClusterRole.py +1 -3
- pulumi_kubernetes/rbac/v1/ClusterRoleBinding.py +1 -3
- pulumi_kubernetes/rbac/v1/ClusterRoleBindingList.py +1 -3
- pulumi_kubernetes/rbac/v1/ClusterRoleBindingPatch.py +1 -3
- pulumi_kubernetes/rbac/v1/ClusterRoleList.py +1 -3
- pulumi_kubernetes/rbac/v1/ClusterRolePatch.py +1 -3
- pulumi_kubernetes/rbac/v1/Role.py +1 -3
- pulumi_kubernetes/rbac/v1/RoleBinding.py +1 -3
- pulumi_kubernetes/rbac/v1/RoleBindingList.py +1 -3
- pulumi_kubernetes/rbac/v1/RoleBindingPatch.py +1 -3
- pulumi_kubernetes/rbac/v1/RoleList.py +1 -3
- pulumi_kubernetes/rbac/v1/RolePatch.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/ClusterRole.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/ClusterRoleBinding.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/ClusterRoleBindingList.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/ClusterRoleBindingPatch.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/ClusterRoleList.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/ClusterRolePatch.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/Role.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/RoleBinding.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/RoleBindingList.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/RoleBindingPatch.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/RoleList.py +1 -3
- pulumi_kubernetes/rbac/v1alpha1/RolePatch.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/ClusterRole.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/ClusterRoleBinding.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/ClusterRoleBindingList.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/ClusterRoleBindingPatch.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/ClusterRoleList.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/ClusterRolePatch.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/Role.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/RoleBinding.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/RoleBindingList.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/RoleBindingPatch.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/RoleList.py +1 -3
- pulumi_kubernetes/rbac/v1beta1/RolePatch.py +1 -3
- pulumi_kubernetes/resource/__init__.py +3 -0
- pulumi_kubernetes/resource/v1alpha1/PodScheduling.py +1 -3
- pulumi_kubernetes/resource/v1alpha1/PodSchedulingList.py +1 -3
- pulumi_kubernetes/resource/v1alpha1/PodSchedulingPatch.py +1 -3
- pulumi_kubernetes/resource/v1alpha1/ResourceClaim.py +2 -4
- pulumi_kubernetes/resource/v1alpha1/ResourceClaimList.py +1 -3
- pulumi_kubernetes/resource/v1alpha1/ResourceClaimPatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha1/ResourceClaimTemplate.py +2 -4
- pulumi_kubernetes/resource/v1alpha1/ResourceClaimTemplateList.py +1 -3
- pulumi_kubernetes/resource/v1alpha1/ResourceClaimTemplatePatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha1/ResourceClass.py +1 -3
- pulumi_kubernetes/resource/v1alpha1/ResourceClassList.py +1 -3
- pulumi_kubernetes/resource/v1alpha1/ResourceClassPatch.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/PodSchedulingContext.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/PodSchedulingContextList.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/PodSchedulingContextPatch.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClaim.py +2 -4
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimList.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimParameters.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimParametersList.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimParametersPatch.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimPatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimTemplate.py +2 -4
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimTemplateList.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClaimTemplatePatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha2/ResourceClass.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClassList.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClassParameters.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClassParametersList.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClassParametersPatch.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceClassPatch.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceSlice.py +2 -4
- pulumi_kubernetes/resource/v1alpha2/ResourceSliceList.py +1 -3
- pulumi_kubernetes/resource/v1alpha2/ResourceSlicePatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/DeviceClass.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/DeviceClassList.py +1 -3
- pulumi_kubernetes/resource/v1alpha3/DeviceClassPatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/DeviceTaintRule.py +225 -0
- pulumi_kubernetes/resource/v1alpha3/DeviceTaintRuleList.py +217 -0
- pulumi_kubernetes/resource/v1alpha3/DeviceTaintRulePatch.py +236 -0
- pulumi_kubernetes/resource/v1alpha3/PodSchedulingContext.py +1 -3
- pulumi_kubernetes/resource/v1alpha3/PodSchedulingContextList.py +1 -3
- pulumi_kubernetes/resource/v1alpha3/PodSchedulingContextPatch.py +1 -3
- pulumi_kubernetes/resource/v1alpha3/ResourceClaim.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/ResourceClaimList.py +1 -3
- pulumi_kubernetes/resource/v1alpha3/ResourceClaimPatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/ResourceClaimTemplate.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/ResourceClaimTemplateList.py +1 -3
- pulumi_kubernetes/resource/v1alpha3/ResourceClaimTemplatePatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/ResourceSlice.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/ResourceSliceList.py +1 -3
- pulumi_kubernetes/resource/v1alpha3/ResourceSlicePatch.py +2 -4
- pulumi_kubernetes/resource/v1alpha3/__init__.py +3 -0
- pulumi_kubernetes/resource/v1alpha3/_inputs.py +2559 -213
- pulumi_kubernetes/resource/v1alpha3/outputs.py +2037 -256
- pulumi_kubernetes/resource/v1beta1/DeviceClass.py +2 -4
- pulumi_kubernetes/resource/v1beta1/DeviceClassList.py +1 -3
- pulumi_kubernetes/resource/v1beta1/DeviceClassPatch.py +2 -4
- pulumi_kubernetes/resource/v1beta1/ResourceClaim.py +2 -4
- pulumi_kubernetes/resource/v1beta1/ResourceClaimList.py +1 -3
- pulumi_kubernetes/resource/v1beta1/ResourceClaimPatch.py +2 -4
- pulumi_kubernetes/resource/v1beta1/ResourceClaimTemplate.py +2 -4
- pulumi_kubernetes/resource/v1beta1/ResourceClaimTemplateList.py +1 -3
- pulumi_kubernetes/resource/v1beta1/ResourceClaimTemplatePatch.py +2 -4
- pulumi_kubernetes/resource/v1beta1/ResourceSlice.py +2 -4
- pulumi_kubernetes/resource/v1beta1/ResourceSliceList.py +1 -3
- pulumi_kubernetes/resource/v1beta1/ResourceSlicePatch.py +2 -4
- pulumi_kubernetes/resource/v1beta1/_inputs.py +2044 -176
- pulumi_kubernetes/resource/v1beta1/outputs.py +1536 -134
- pulumi_kubernetes/resource/v1beta2/DeviceClass.py +239 -0
- pulumi_kubernetes/resource/v1beta2/DeviceClassList.py +217 -0
- pulumi_kubernetes/resource/v1beta2/DeviceClassPatch.py +250 -0
- pulumi_kubernetes/resource/v1beta2/ResourceClaim.py +234 -0
- pulumi_kubernetes/resource/v1beta2/ResourceClaimList.py +218 -0
- pulumi_kubernetes/resource/v1beta2/ResourceClaimPatch.py +245 -0
- pulumi_kubernetes/resource/v1beta2/ResourceClaimTemplate.py +231 -0
- pulumi_kubernetes/resource/v1beta2/ResourceClaimTemplateList.py +217 -0
- pulumi_kubernetes/resource/v1beta2/ResourceClaimTemplatePatch.py +242 -0
- pulumi_kubernetes/resource/v1beta2/ResourceSlice.py +248 -0
- pulumi_kubernetes/resource/v1beta2/ResourceSliceList.py +218 -0
- pulumi_kubernetes/resource/v1beta2/ResourceSlicePatch.py +259 -0
- pulumi_kubernetes/resource/v1beta2/__init__.py +22 -0
- pulumi_kubernetes/resource/v1beta2/_inputs.py +5681 -0
- pulumi_kubernetes/resource/v1beta2/outputs.py +4726 -0
- pulumi_kubernetes/scheduling/v1/PriorityClass.py +1 -3
- pulumi_kubernetes/scheduling/v1/PriorityClassList.py +1 -3
- pulumi_kubernetes/scheduling/v1/PriorityClassPatch.py +1 -3
- pulumi_kubernetes/scheduling/v1alpha1/PriorityClass.py +1 -3
- pulumi_kubernetes/scheduling/v1alpha1/PriorityClassList.py +1 -3
- pulumi_kubernetes/scheduling/v1alpha1/PriorityClassPatch.py +1 -3
- pulumi_kubernetes/scheduling/v1beta1/PriorityClass.py +1 -3
- pulumi_kubernetes/scheduling/v1beta1/PriorityClassList.py +1 -3
- pulumi_kubernetes/scheduling/v1beta1/PriorityClassPatch.py +1 -3
- pulumi_kubernetes/settings/v1alpha1/PodPreset.py +1 -3
- pulumi_kubernetes/settings/v1alpha1/PodPresetList.py +1 -3
- pulumi_kubernetes/settings/v1alpha1/PodPresetPatch.py +1 -3
- pulumi_kubernetes/storage/v1/CSIDriver.py +1 -3
- pulumi_kubernetes/storage/v1/CSIDriverList.py +1 -3
- pulumi_kubernetes/storage/v1/CSIDriverPatch.py +1 -3
- pulumi_kubernetes/storage/v1/CSINode.py +1 -3
- pulumi_kubernetes/storage/v1/CSINodeList.py +1 -3
- pulumi_kubernetes/storage/v1/CSINodePatch.py +1 -3
- pulumi_kubernetes/storage/v1/CSIStorageCapacity.py +1 -3
- pulumi_kubernetes/storage/v1/CSIStorageCapacityList.py +1 -3
- pulumi_kubernetes/storage/v1/CSIStorageCapacityPatch.py +1 -3
- pulumi_kubernetes/storage/v1/StorageClass.py +1 -3
- pulumi_kubernetes/storage/v1/StorageClassList.py +1 -3
- pulumi_kubernetes/storage/v1/StorageClassPatch.py +1 -3
- pulumi_kubernetes/storage/v1/VolumeAttachment.py +1 -3
- pulumi_kubernetes/storage/v1/VolumeAttachmentList.py +1 -3
- pulumi_kubernetes/storage/v1/VolumeAttachmentPatch.py +1 -3
- pulumi_kubernetes/storage/v1/_inputs.py +90 -0
- pulumi_kubernetes/storage/v1/outputs.py +110 -0
- pulumi_kubernetes/storage/v1alpha1/VolumeAttachment.py +1 -3
- pulumi_kubernetes/storage/v1alpha1/VolumeAttachmentList.py +1 -3
- pulumi_kubernetes/storage/v1alpha1/VolumeAttachmentPatch.py +1 -3
- pulumi_kubernetes/storage/v1alpha1/VolumeAttributesClass.py +1 -3
- pulumi_kubernetes/storage/v1alpha1/VolumeAttributesClassList.py +1 -3
- pulumi_kubernetes/storage/v1alpha1/VolumeAttributesClassPatch.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSIDriver.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSIDriverList.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSIDriverPatch.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSINode.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSINodeList.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSINodePatch.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSIStorageCapacityList.py +1 -3
- pulumi_kubernetes/storage/v1beta1/CSIStorageCapacityPatch.py +1 -3
- pulumi_kubernetes/storage/v1beta1/StorageClass.py +1 -3
- pulumi_kubernetes/storage/v1beta1/StorageClassList.py +1 -3
- pulumi_kubernetes/storage/v1beta1/StorageClassPatch.py +1 -3
- pulumi_kubernetes/storage/v1beta1/VolumeAttachment.py +1 -3
- pulumi_kubernetes/storage/v1beta1/VolumeAttachmentList.py +1 -3
- pulumi_kubernetes/storage/v1beta1/VolumeAttachmentPatch.py +1 -3
- pulumi_kubernetes/storage/v1beta1/VolumeAttributesClass.py +1 -3
- pulumi_kubernetes/storage/v1beta1/VolumeAttributesClassList.py +1 -3
- pulumi_kubernetes/storage/v1beta1/VolumeAttributesClassPatch.py +1 -3
- pulumi_kubernetes/storagemigration/v1alpha1/StorageVersionMigration.py +1 -3
- pulumi_kubernetes/storagemigration/v1alpha1/StorageVersionMigrationList.py +1 -3
- pulumi_kubernetes/storagemigration/v1alpha1/StorageVersionMigrationPatch.py +1 -3
- pulumi_kubernetes/yaml/v2/ConfigFile.py +1 -3
- pulumi_kubernetes/yaml/v2/ConfigGroup.py +1 -3
- pulumi_kubernetes/yaml/yaml.py +108 -0
- {pulumi_kubernetes-4.23.0a1746131759.dist-info → pulumi_kubernetes-4.23.0a1746153578.dist-info}/METADATA +2 -2
- pulumi_kubernetes-4.23.0a1746153578.dist-info/RECORD +709 -0
- pulumi_kubernetes-4.23.0a1746131759.dist-info/RECORD +0 -679
- {pulumi_kubernetes-4.23.0a1746131759.dist-info → pulumi_kubernetes-4.23.0a1746153578.dist-info}/WHEEL +0 -0
- {pulumi_kubernetes-4.23.0a1746131759.dist-info → pulumi_kubernetes-4.23.0a1746153578.dist-info}/top_level.txt +0 -0
|
@@ -25,6 +25,9 @@ __all__ = [
|
|
|
25
25
|
'CertificateSigningRequestSpecPatch',
|
|
26
26
|
'CertificateSigningRequestStatus',
|
|
27
27
|
'CertificateSigningRequestStatusPatch',
|
|
28
|
+
'ClusterTrustBundle',
|
|
29
|
+
'ClusterTrustBundleSpec',
|
|
30
|
+
'ClusterTrustBundleSpecPatch',
|
|
28
31
|
]
|
|
29
32
|
|
|
30
33
|
@pulumi.output_type
|
|
@@ -619,3 +622,241 @@ class CertificateSigningRequestStatusPatch(dict):
|
|
|
619
622
|
return pulumi.get(self, "conditions")
|
|
620
623
|
|
|
621
624
|
|
|
625
|
+
@pulumi.output_type
|
|
626
|
+
class ClusterTrustBundle(dict):
|
|
627
|
+
"""
|
|
628
|
+
ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors (root certificates).
|
|
629
|
+
|
|
630
|
+
ClusterTrustBundle objects are considered to be readable by any authenticated user in the cluster, because they can be mounted by pods using the `clusterTrustBundle` projection. All service accounts have read access to ClusterTrustBundles by default. Users who only have namespace-level access to a cluster can read ClusterTrustBundles by impersonating a serviceaccount that they have access to.
|
|
631
|
+
|
|
632
|
+
It can be optionally associated with a particular assigner, in which case it contains one valid set of trust anchors for that signer. Signers may have multiple associated ClusterTrustBundles; each is an independent set of trust anchors for that signer. Admission control is used to enforce that only users with permissions on the signer can create or modify the corresponding bundle.
|
|
633
|
+
"""
|
|
634
|
+
@staticmethod
|
|
635
|
+
def __key_warning(key: str):
|
|
636
|
+
suggest = None
|
|
637
|
+
if key == "apiVersion":
|
|
638
|
+
suggest = "api_version"
|
|
639
|
+
|
|
640
|
+
if suggest:
|
|
641
|
+
pulumi.log.warn(f"Key '{key}' not found in ClusterTrustBundle. Access the value via the '{suggest}' property getter instead.")
|
|
642
|
+
|
|
643
|
+
def __getitem__(self, key: str) -> Any:
|
|
644
|
+
ClusterTrustBundle.__key_warning(key)
|
|
645
|
+
return super().__getitem__(key)
|
|
646
|
+
|
|
647
|
+
def get(self, key: str, default = None) -> Any:
|
|
648
|
+
ClusterTrustBundle.__key_warning(key)
|
|
649
|
+
return super().get(key, default)
|
|
650
|
+
|
|
651
|
+
def __init__(__self__, *,
|
|
652
|
+
spec: 'outputs.ClusterTrustBundleSpec',
|
|
653
|
+
api_version: Optional[builtins.str] = None,
|
|
654
|
+
kind: Optional[builtins.str] = None,
|
|
655
|
+
metadata: Optional['_meta.v1.outputs.ObjectMeta'] = None):
|
|
656
|
+
"""
|
|
657
|
+
ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors (root certificates).
|
|
658
|
+
|
|
659
|
+
ClusterTrustBundle objects are considered to be readable by any authenticated user in the cluster, because they can be mounted by pods using the `clusterTrustBundle` projection. All service accounts have read access to ClusterTrustBundles by default. Users who only have namespace-level access to a cluster can read ClusterTrustBundles by impersonating a serviceaccount that they have access to.
|
|
660
|
+
|
|
661
|
+
It can be optionally associated with a particular assigner, in which case it contains one valid set of trust anchors for that signer. Signers may have multiple associated ClusterTrustBundles; each is an independent set of trust anchors for that signer. Admission control is used to enforce that only users with permissions on the signer can create or modify the corresponding bundle.
|
|
662
|
+
:param 'ClusterTrustBundleSpecArgs' spec: spec contains the signer (if any) and trust anchors.
|
|
663
|
+
:param builtins.str api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
|
664
|
+
:param builtins.str kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
|
665
|
+
:param '_meta.v1.ObjectMetaArgs' metadata: metadata contains the object metadata.
|
|
666
|
+
"""
|
|
667
|
+
pulumi.set(__self__, "spec", spec)
|
|
668
|
+
if api_version is not None:
|
|
669
|
+
pulumi.set(__self__, "api_version", 'certificates.k8s.io/v1beta1')
|
|
670
|
+
if kind is not None:
|
|
671
|
+
pulumi.set(__self__, "kind", 'ClusterTrustBundle')
|
|
672
|
+
if metadata is not None:
|
|
673
|
+
pulumi.set(__self__, "metadata", metadata)
|
|
674
|
+
|
|
675
|
+
@property
|
|
676
|
+
@pulumi.getter
|
|
677
|
+
def spec(self) -> 'outputs.ClusterTrustBundleSpec':
|
|
678
|
+
"""
|
|
679
|
+
spec contains the signer (if any) and trust anchors.
|
|
680
|
+
"""
|
|
681
|
+
return pulumi.get(self, "spec")
|
|
682
|
+
|
|
683
|
+
@property
|
|
684
|
+
@pulumi.getter(name="apiVersion")
|
|
685
|
+
def api_version(self) -> Optional[builtins.str]:
|
|
686
|
+
"""
|
|
687
|
+
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
|
688
|
+
"""
|
|
689
|
+
return pulumi.get(self, "api_version")
|
|
690
|
+
|
|
691
|
+
@property
|
|
692
|
+
@pulumi.getter
|
|
693
|
+
def kind(self) -> Optional[builtins.str]:
|
|
694
|
+
"""
|
|
695
|
+
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
|
696
|
+
"""
|
|
697
|
+
return pulumi.get(self, "kind")
|
|
698
|
+
|
|
699
|
+
@property
|
|
700
|
+
@pulumi.getter
|
|
701
|
+
def metadata(self) -> Optional['_meta.v1.outputs.ObjectMeta']:
|
|
702
|
+
"""
|
|
703
|
+
metadata contains the object metadata.
|
|
704
|
+
"""
|
|
705
|
+
return pulumi.get(self, "metadata")
|
|
706
|
+
|
|
707
|
+
|
|
708
|
+
@pulumi.output_type
|
|
709
|
+
class ClusterTrustBundleSpec(dict):
|
|
710
|
+
"""
|
|
711
|
+
ClusterTrustBundleSpec contains the signer and trust anchors.
|
|
712
|
+
"""
|
|
713
|
+
@staticmethod
|
|
714
|
+
def __key_warning(key: str):
|
|
715
|
+
suggest = None
|
|
716
|
+
if key == "trustBundle":
|
|
717
|
+
suggest = "trust_bundle"
|
|
718
|
+
elif key == "signerName":
|
|
719
|
+
suggest = "signer_name"
|
|
720
|
+
|
|
721
|
+
if suggest:
|
|
722
|
+
pulumi.log.warn(f"Key '{key}' not found in ClusterTrustBundleSpec. Access the value via the '{suggest}' property getter instead.")
|
|
723
|
+
|
|
724
|
+
def __getitem__(self, key: str) -> Any:
|
|
725
|
+
ClusterTrustBundleSpec.__key_warning(key)
|
|
726
|
+
return super().__getitem__(key)
|
|
727
|
+
|
|
728
|
+
def get(self, key: str, default = None) -> Any:
|
|
729
|
+
ClusterTrustBundleSpec.__key_warning(key)
|
|
730
|
+
return super().get(key, default)
|
|
731
|
+
|
|
732
|
+
def __init__(__self__, *,
|
|
733
|
+
trust_bundle: builtins.str,
|
|
734
|
+
signer_name: Optional[builtins.str] = None):
|
|
735
|
+
"""
|
|
736
|
+
ClusterTrustBundleSpec contains the signer and trust anchors.
|
|
737
|
+
:param builtins.str trust_bundle: trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.
|
|
738
|
+
|
|
739
|
+
The data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.
|
|
740
|
+
|
|
741
|
+
Users of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data.
|
|
742
|
+
:param builtins.str signer_name: signerName indicates the associated signer, if any.
|
|
743
|
+
|
|
744
|
+
In order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName=<the signer name> verb=attest.
|
|
745
|
+
|
|
746
|
+
If signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.
|
|
747
|
+
|
|
748
|
+
If signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.
|
|
749
|
+
|
|
750
|
+
List/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.
|
|
751
|
+
"""
|
|
752
|
+
pulumi.set(__self__, "trust_bundle", trust_bundle)
|
|
753
|
+
if signer_name is not None:
|
|
754
|
+
pulumi.set(__self__, "signer_name", signer_name)
|
|
755
|
+
|
|
756
|
+
@property
|
|
757
|
+
@pulumi.getter(name="trustBundle")
|
|
758
|
+
def trust_bundle(self) -> builtins.str:
|
|
759
|
+
"""
|
|
760
|
+
trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.
|
|
761
|
+
|
|
762
|
+
The data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.
|
|
763
|
+
|
|
764
|
+
Users of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data.
|
|
765
|
+
"""
|
|
766
|
+
return pulumi.get(self, "trust_bundle")
|
|
767
|
+
|
|
768
|
+
@property
|
|
769
|
+
@pulumi.getter(name="signerName")
|
|
770
|
+
def signer_name(self) -> Optional[builtins.str]:
|
|
771
|
+
"""
|
|
772
|
+
signerName indicates the associated signer, if any.
|
|
773
|
+
|
|
774
|
+
In order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName=<the signer name> verb=attest.
|
|
775
|
+
|
|
776
|
+
If signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.
|
|
777
|
+
|
|
778
|
+
If signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.
|
|
779
|
+
|
|
780
|
+
List/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.
|
|
781
|
+
"""
|
|
782
|
+
return pulumi.get(self, "signer_name")
|
|
783
|
+
|
|
784
|
+
|
|
785
|
+
@pulumi.output_type
|
|
786
|
+
class ClusterTrustBundleSpecPatch(dict):
|
|
787
|
+
"""
|
|
788
|
+
ClusterTrustBundleSpec contains the signer and trust anchors.
|
|
789
|
+
"""
|
|
790
|
+
@staticmethod
|
|
791
|
+
def __key_warning(key: str):
|
|
792
|
+
suggest = None
|
|
793
|
+
if key == "signerName":
|
|
794
|
+
suggest = "signer_name"
|
|
795
|
+
elif key == "trustBundle":
|
|
796
|
+
suggest = "trust_bundle"
|
|
797
|
+
|
|
798
|
+
if suggest:
|
|
799
|
+
pulumi.log.warn(f"Key '{key}' not found in ClusterTrustBundleSpecPatch. Access the value via the '{suggest}' property getter instead.")
|
|
800
|
+
|
|
801
|
+
def __getitem__(self, key: str) -> Any:
|
|
802
|
+
ClusterTrustBundleSpecPatch.__key_warning(key)
|
|
803
|
+
return super().__getitem__(key)
|
|
804
|
+
|
|
805
|
+
def get(self, key: str, default = None) -> Any:
|
|
806
|
+
ClusterTrustBundleSpecPatch.__key_warning(key)
|
|
807
|
+
return super().get(key, default)
|
|
808
|
+
|
|
809
|
+
def __init__(__self__, *,
|
|
810
|
+
signer_name: Optional[builtins.str] = None,
|
|
811
|
+
trust_bundle: Optional[builtins.str] = None):
|
|
812
|
+
"""
|
|
813
|
+
ClusterTrustBundleSpec contains the signer and trust anchors.
|
|
814
|
+
:param builtins.str signer_name: signerName indicates the associated signer, if any.
|
|
815
|
+
|
|
816
|
+
In order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName=<the signer name> verb=attest.
|
|
817
|
+
|
|
818
|
+
If signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.
|
|
819
|
+
|
|
820
|
+
If signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.
|
|
821
|
+
|
|
822
|
+
List/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.
|
|
823
|
+
:param builtins.str trust_bundle: trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.
|
|
824
|
+
|
|
825
|
+
The data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.
|
|
826
|
+
|
|
827
|
+
Users of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data.
|
|
828
|
+
"""
|
|
829
|
+
if signer_name is not None:
|
|
830
|
+
pulumi.set(__self__, "signer_name", signer_name)
|
|
831
|
+
if trust_bundle is not None:
|
|
832
|
+
pulumi.set(__self__, "trust_bundle", trust_bundle)
|
|
833
|
+
|
|
834
|
+
@property
|
|
835
|
+
@pulumi.getter(name="signerName")
|
|
836
|
+
def signer_name(self) -> Optional[builtins.str]:
|
|
837
|
+
"""
|
|
838
|
+
signerName indicates the associated signer, if any.
|
|
839
|
+
|
|
840
|
+
In order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName=<the signer name> verb=attest.
|
|
841
|
+
|
|
842
|
+
If signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.
|
|
843
|
+
|
|
844
|
+
If signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.
|
|
845
|
+
|
|
846
|
+
List/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.
|
|
847
|
+
"""
|
|
848
|
+
return pulumi.get(self, "signer_name")
|
|
849
|
+
|
|
850
|
+
@property
|
|
851
|
+
@pulumi.getter(name="trustBundle")
|
|
852
|
+
def trust_bundle(self) -> Optional[builtins.str]:
|
|
853
|
+
"""
|
|
854
|
+
trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.
|
|
855
|
+
|
|
856
|
+
The data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.
|
|
857
|
+
|
|
858
|
+
Users of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data.
|
|
859
|
+
"""
|
|
860
|
+
return pulumi.get(self, "trust_bundle")
|
|
861
|
+
|
|
862
|
+
|
|
@@ -92,10 +92,8 @@ class LeaseInitArgs:
|
|
|
92
92
|
pulumi.set(self, "spec", value)
|
|
93
93
|
|
|
94
94
|
|
|
95
|
+
@pulumi.type_token("kubernetes:coordination.k8s.io/v1:Lease")
|
|
95
96
|
class Lease(pulumi.CustomResource):
|
|
96
|
-
|
|
97
|
-
pulumi_type = "kubernetes:coordination.k8s.io/v1:Lease"
|
|
98
|
-
|
|
99
97
|
@overload
|
|
100
98
|
def __init__(__self__,
|
|
101
99
|
resource_name: str,
|
|
@@ -91,10 +91,8 @@ class LeaseListArgs:
|
|
|
91
91
|
pulumi.set(self, "metadata", value)
|
|
92
92
|
|
|
93
93
|
|
|
94
|
+
@pulumi.type_token("kubernetes:coordination.k8s.io/v1:LeaseList")
|
|
94
95
|
class LeaseList(pulumi.CustomResource):
|
|
95
|
-
|
|
96
|
-
pulumi_type = "kubernetes:coordination.k8s.io/v1:LeaseList"
|
|
97
|
-
|
|
98
96
|
@overload
|
|
99
97
|
def __init__(__self__,
|
|
100
98
|
resource_name: str,
|
|
@@ -92,10 +92,8 @@ class LeasePatchArgs:
|
|
|
92
92
|
pulumi.set(self, "spec", value)
|
|
93
93
|
|
|
94
94
|
|
|
95
|
+
@pulumi.type_token("kubernetes:coordination.k8s.io/v1:LeasePatch")
|
|
95
96
|
class LeasePatch(pulumi.CustomResource):
|
|
96
|
-
|
|
97
|
-
pulumi_type = "kubernetes:coordination.k8s.io/v1:LeasePatch"
|
|
98
|
-
|
|
99
97
|
@overload
|
|
100
98
|
def __init__(__self__,
|
|
101
99
|
resource_name: str,
|
|
@@ -92,10 +92,8 @@ class LeaseCandidateInitArgs:
|
|
|
92
92
|
pulumi.set(self, "spec", value)
|
|
93
93
|
|
|
94
94
|
|
|
95
|
+
@pulumi.type_token("kubernetes:coordination.k8s.io/v1alpha1:LeaseCandidate")
|
|
95
96
|
class LeaseCandidate(pulumi.CustomResource):
|
|
96
|
-
|
|
97
|
-
pulumi_type = "kubernetes:coordination.k8s.io/v1alpha1:LeaseCandidate"
|
|
98
|
-
|
|
99
97
|
@overload
|
|
100
98
|
def __init__(__self__,
|
|
101
99
|
resource_name: str,
|
|
@@ -156,7 +154,7 @@ class LeaseCandidate(pulumi.CustomResource):
|
|
|
156
154
|
__props__.__dict__["kind"] = 'LeaseCandidate'
|
|
157
155
|
__props__.__dict__["metadata"] = metadata
|
|
158
156
|
__props__.__dict__["spec"] = spec
|
|
159
|
-
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:coordination.k8s.io/v1alpha2:LeaseCandidate")])
|
|
157
|
+
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:coordination.k8s.io/v1alpha2:LeaseCandidate"), pulumi.Alias(type_="kubernetes:coordination.k8s.io/v1beta1:LeaseCandidate")])
|
|
160
158
|
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
|
|
161
159
|
super(LeaseCandidate, __self__).__init__(
|
|
162
160
|
'kubernetes:coordination.k8s.io/v1alpha1:LeaseCandidate',
|
|
@@ -91,10 +91,8 @@ class LeaseCandidateListArgs:
|
|
|
91
91
|
pulumi.set(self, "metadata", value)
|
|
92
92
|
|
|
93
93
|
|
|
94
|
+
@pulumi.type_token("kubernetes:coordination.k8s.io/v1alpha1:LeaseCandidateList")
|
|
94
95
|
class LeaseCandidateList(pulumi.CustomResource):
|
|
95
|
-
|
|
96
|
-
pulumi_type = "kubernetes:coordination.k8s.io/v1alpha1:LeaseCandidateList"
|
|
97
|
-
|
|
98
96
|
@overload
|
|
99
97
|
def __init__(__self__,
|
|
100
98
|
resource_name: str,
|
|
@@ -92,10 +92,8 @@ class LeaseCandidatePatchArgs:
|
|
|
92
92
|
pulumi.set(self, "spec", value)
|
|
93
93
|
|
|
94
94
|
|
|
95
|
+
@pulumi.type_token("kubernetes:coordination.k8s.io/v1alpha1:LeaseCandidatePatch")
|
|
95
96
|
class LeaseCandidatePatch(pulumi.CustomResource):
|
|
96
|
-
|
|
97
|
-
pulumi_type = "kubernetes:coordination.k8s.io/v1alpha1:LeaseCandidatePatch"
|
|
98
|
-
|
|
99
97
|
@overload
|
|
100
98
|
def __init__(__self__,
|
|
101
99
|
resource_name: str,
|
|
@@ -168,7 +166,7 @@ class LeaseCandidatePatch(pulumi.CustomResource):
|
|
|
168
166
|
__props__.__dict__["kind"] = 'LeaseCandidate'
|
|
169
167
|
__props__.__dict__["metadata"] = metadata
|
|
170
168
|
__props__.__dict__["spec"] = spec
|
|
171
|
-
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:coordination.k8s.io/v1alpha2:LeaseCandidatePatch")])
|
|
169
|
+
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:coordination.k8s.io/v1alpha2:LeaseCandidatePatch"), pulumi.Alias(type_="kubernetes:coordination.k8s.io/v1beta1:LeaseCandidatePatch")])
|
|
172
170
|
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
|
|
173
171
|
super(LeaseCandidatePatch, __self__).__init__(
|
|
174
172
|
'kubernetes:coordination.k8s.io/v1alpha1:LeaseCandidatePatch',
|
|
@@ -92,10 +92,8 @@ class LeaseCandidateInitArgs:
|
|
|
92
92
|
pulumi.set(self, "spec", value)
|
|
93
93
|
|
|
94
94
|
|
|
95
|
+
@pulumi.type_token("kubernetes:coordination.k8s.io/v1alpha2:LeaseCandidate")
|
|
95
96
|
class LeaseCandidate(pulumi.CustomResource):
|
|
96
|
-
|
|
97
|
-
pulumi_type = "kubernetes:coordination.k8s.io/v1alpha2:LeaseCandidate"
|
|
98
|
-
|
|
99
97
|
@overload
|
|
100
98
|
def __init__(__self__,
|
|
101
99
|
resource_name: str,
|
|
@@ -156,7 +154,7 @@ class LeaseCandidate(pulumi.CustomResource):
|
|
|
156
154
|
__props__.__dict__["kind"] = 'LeaseCandidate'
|
|
157
155
|
__props__.__dict__["metadata"] = metadata
|
|
158
156
|
__props__.__dict__["spec"] = spec
|
|
159
|
-
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:coordination.k8s.io/v1alpha1:LeaseCandidate")])
|
|
157
|
+
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:coordination.k8s.io/v1alpha1:LeaseCandidate"), pulumi.Alias(type_="kubernetes:coordination.k8s.io/v1beta1:LeaseCandidate")])
|
|
160
158
|
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
|
|
161
159
|
super(LeaseCandidate, __self__).__init__(
|
|
162
160
|
'kubernetes:coordination.k8s.io/v1alpha2:LeaseCandidate',
|
|
@@ -91,10 +91,8 @@ class LeaseCandidateListArgs:
|
|
|
91
91
|
pulumi.set(self, "metadata", value)
|
|
92
92
|
|
|
93
93
|
|
|
94
|
+
@pulumi.type_token("kubernetes:coordination.k8s.io/v1alpha2:LeaseCandidateList")
|
|
94
95
|
class LeaseCandidateList(pulumi.CustomResource):
|
|
95
|
-
|
|
96
|
-
pulumi_type = "kubernetes:coordination.k8s.io/v1alpha2:LeaseCandidateList"
|
|
97
|
-
|
|
98
96
|
@overload
|
|
99
97
|
def __init__(__self__,
|
|
100
98
|
resource_name: str,
|
|
@@ -92,10 +92,8 @@ class LeaseCandidatePatchArgs:
|
|
|
92
92
|
pulumi.set(self, "spec", value)
|
|
93
93
|
|
|
94
94
|
|
|
95
|
+
@pulumi.type_token("kubernetes:coordination.k8s.io/v1alpha2:LeaseCandidatePatch")
|
|
95
96
|
class LeaseCandidatePatch(pulumi.CustomResource):
|
|
96
|
-
|
|
97
|
-
pulumi_type = "kubernetes:coordination.k8s.io/v1alpha2:LeaseCandidatePatch"
|
|
98
|
-
|
|
99
97
|
@overload
|
|
100
98
|
def __init__(__self__,
|
|
101
99
|
resource_name: str,
|
|
@@ -168,7 +166,7 @@ class LeaseCandidatePatch(pulumi.CustomResource):
|
|
|
168
166
|
__props__.__dict__["kind"] = 'LeaseCandidate'
|
|
169
167
|
__props__.__dict__["metadata"] = metadata
|
|
170
168
|
__props__.__dict__["spec"] = spec
|
|
171
|
-
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:coordination.k8s.io/v1alpha1:LeaseCandidatePatch")])
|
|
169
|
+
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:coordination.k8s.io/v1alpha1:LeaseCandidatePatch"), pulumi.Alias(type_="kubernetes:coordination.k8s.io/v1beta1:LeaseCandidatePatch")])
|
|
172
170
|
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
|
|
173
171
|
super(LeaseCandidatePatch, __self__).__init__(
|
|
174
172
|
'kubernetes:coordination.k8s.io/v1alpha2:LeaseCandidatePatch',
|
|
@@ -54,7 +54,7 @@ if not MYPY:
|
|
|
54
54
|
"""
|
|
55
55
|
strategy: NotRequired[pulumi.Input[builtins.str]]
|
|
56
56
|
"""
|
|
57
|
-
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
57
|
+
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
58
58
|
"""
|
|
59
59
|
elif False:
|
|
60
60
|
LeaseCandidateSpecPatchArgsDict: TypeAlias = Mapping[str, Any]
|
|
@@ -75,7 +75,7 @@ class LeaseCandidateSpecPatchArgs:
|
|
|
75
75
|
:param pulumi.Input[builtins.str] lease_name: LeaseName is the name of the lease for which this candidate is contending. This field is immutable.
|
|
76
76
|
:param pulumi.Input[builtins.str] ping_time: PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.
|
|
77
77
|
:param pulumi.Input[builtins.str] renew_time: RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.
|
|
78
|
-
:param pulumi.Input[builtins.str] strategy: Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
78
|
+
:param pulumi.Input[builtins.str] strategy: Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
79
79
|
"""
|
|
80
80
|
if binary_version is not None:
|
|
81
81
|
pulumi.set(__self__, "binary_version", binary_version)
|
|
@@ -154,7 +154,7 @@ class LeaseCandidateSpecPatchArgs:
|
|
|
154
154
|
@pulumi.getter
|
|
155
155
|
def strategy(self) -> Optional[pulumi.Input[builtins.str]]:
|
|
156
156
|
"""
|
|
157
|
-
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
157
|
+
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
158
158
|
"""
|
|
159
159
|
return pulumi.get(self, "strategy")
|
|
160
160
|
|
|
@@ -178,7 +178,7 @@ if not MYPY:
|
|
|
178
178
|
"""
|
|
179
179
|
strategy: pulumi.Input[builtins.str]
|
|
180
180
|
"""
|
|
181
|
-
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
181
|
+
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
182
182
|
"""
|
|
183
183
|
emulation_version: NotRequired[pulumi.Input[builtins.str]]
|
|
184
184
|
"""
|
|
@@ -208,7 +208,7 @@ class LeaseCandidateSpecArgs:
|
|
|
208
208
|
LeaseCandidateSpec is a specification of a Lease.
|
|
209
209
|
:param pulumi.Input[builtins.str] binary_version: BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required.
|
|
210
210
|
:param pulumi.Input[builtins.str] lease_name: LeaseName is the name of the lease for which this candidate is contending. This field is immutable.
|
|
211
|
-
:param pulumi.Input[builtins.str] strategy: Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
211
|
+
:param pulumi.Input[builtins.str] strategy: Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
212
212
|
:param pulumi.Input[builtins.str] emulation_version: EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is "OldestEmulationVersion"
|
|
213
213
|
:param pulumi.Input[builtins.str] ping_time: PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.
|
|
214
214
|
:param pulumi.Input[builtins.str] renew_time: RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.
|
|
@@ -251,7 +251,7 @@ class LeaseCandidateSpecArgs:
|
|
|
251
251
|
@pulumi.getter
|
|
252
252
|
def strategy(self) -> pulumi.Input[builtins.str]:
|
|
253
253
|
"""
|
|
254
|
-
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
254
|
+
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
255
255
|
"""
|
|
256
256
|
return pulumi.get(self, "strategy")
|
|
257
257
|
|
|
@@ -140,7 +140,7 @@ class LeaseCandidateSpec(dict):
|
|
|
140
140
|
LeaseCandidateSpec is a specification of a Lease.
|
|
141
141
|
:param builtins.str binary_version: BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required.
|
|
142
142
|
:param builtins.str lease_name: LeaseName is the name of the lease for which this candidate is contending. This field is immutable.
|
|
143
|
-
:param builtins.str strategy: Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
143
|
+
:param builtins.str strategy: Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
144
144
|
:param builtins.str emulation_version: EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is "OldestEmulationVersion"
|
|
145
145
|
:param builtins.str ping_time: PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.
|
|
146
146
|
:param builtins.str renew_time: RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.
|
|
@@ -175,7 +175,7 @@ class LeaseCandidateSpec(dict):
|
|
|
175
175
|
@pulumi.getter
|
|
176
176
|
def strategy(self) -> builtins.str:
|
|
177
177
|
"""
|
|
178
|
-
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
178
|
+
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
179
179
|
"""
|
|
180
180
|
return pulumi.get(self, "strategy")
|
|
181
181
|
|
|
@@ -248,7 +248,7 @@ class LeaseCandidateSpecPatch(dict):
|
|
|
248
248
|
:param builtins.str lease_name: LeaseName is the name of the lease for which this candidate is contending. This field is immutable.
|
|
249
249
|
:param builtins.str ping_time: PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.
|
|
250
250
|
:param builtins.str renew_time: RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.
|
|
251
|
-
:param builtins.str strategy: Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
251
|
+
:param builtins.str strategy: Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
252
252
|
"""
|
|
253
253
|
if binary_version is not None:
|
|
254
254
|
pulumi.set(__self__, "binary_version", binary_version)
|
|
@@ -307,7 +307,7 @@ class LeaseCandidateSpecPatch(dict):
|
|
|
307
307
|
@pulumi.getter
|
|
308
308
|
def strategy(self) -> Optional[builtins.str]:
|
|
309
309
|
"""
|
|
310
|
-
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
310
|
+
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.
|
|
311
311
|
"""
|
|
312
312
|
return pulumi.get(self, "strategy")
|
|
313
313
|
|
|
@@ -92,10 +92,8 @@ class LeaseInitArgs:
|
|
|
92
92
|
pulumi.set(self, "spec", value)
|
|
93
93
|
|
|
94
94
|
|
|
95
|
+
@pulumi.type_token("kubernetes:coordination.k8s.io/v1beta1:Lease")
|
|
95
96
|
class Lease(pulumi.CustomResource):
|
|
96
|
-
|
|
97
|
-
pulumi_type = "kubernetes:coordination.k8s.io/v1beta1:Lease"
|
|
98
|
-
|
|
99
97
|
@overload
|
|
100
98
|
def __init__(__self__,
|
|
101
99
|
resource_name: str,
|