pulumi-vsphere 4.10.3a1723624830__py3-none-any.whl → 4.11.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-vsphere might be problematic. Click here for more details.

Files changed (47) hide show
  1. pulumi_vsphere/_inputs.py +6 -12
  2. pulumi_vsphere/_utilities.py +4 -40
  3. pulumi_vsphere/compute_cluster.py +20 -20
  4. pulumi_vsphere/compute_cluster_vm_affinity_rule.py +18 -14
  5. pulumi_vsphere/content_library.py +10 -10
  6. pulumi_vsphere/datacenter.py +28 -7
  7. pulumi_vsphere/datastore_cluster.py +14 -0
  8. pulumi_vsphere/distributed_port_group.py +12 -61
  9. pulumi_vsphere/distributed_virtual_switch.py +43 -22
  10. pulumi_vsphere/entity_permissions.py +38 -59
  11. pulumi_vsphere/folder.py +21 -0
  12. pulumi_vsphere/get_compute_cluster_host_group.py +16 -18
  13. pulumi_vsphere/get_content_library.py +6 -10
  14. pulumi_vsphere/get_content_library_item.py +8 -12
  15. pulumi_vsphere/get_datastore.py +9 -9
  16. pulumi_vsphere/get_datastore_stats.py +32 -34
  17. pulumi_vsphere/get_dynamic.py +12 -14
  18. pulumi_vsphere/get_guest_os_customization.py +43 -8
  19. pulumi_vsphere/get_host_base_images.py +6 -6
  20. pulumi_vsphere/get_host_pci_device.py +2 -4
  21. pulumi_vsphere/get_host_thumbprint.py +12 -12
  22. pulumi_vsphere/get_host_vgpu_profile.py +2 -4
  23. pulumi_vsphere/get_license.py +1 -2
  24. pulumi_vsphere/get_network.py +14 -14
  25. pulumi_vsphere/get_resource_pool.py +8 -12
  26. pulumi_vsphere/get_role.py +4 -4
  27. pulumi_vsphere/get_virtual_machine.py +35 -60
  28. pulumi_vsphere/guest_os_customization.py +31 -31
  29. pulumi_vsphere/host_port_group.py +2 -2
  30. pulumi_vsphere/nas_datastore.py +7 -7
  31. pulumi_vsphere/offline_software_depot.py +2 -2
  32. pulumi_vsphere/outputs.py +40 -48
  33. pulumi_vsphere/provider.py +6 -2
  34. pulumi_vsphere/pulumi-plugin.json +1 -1
  35. pulumi_vsphere/resource_pool.py +2 -2
  36. pulumi_vsphere/supervisor.py +30 -134
  37. pulumi_vsphere/virtual_disk.py +30 -38
  38. pulumi_vsphere/virtual_machine.py +32 -32
  39. pulumi_vsphere/virtual_machine_class.py +0 -2
  40. pulumi_vsphere/virtual_machine_snapshot.py +2 -2
  41. pulumi_vsphere/vm_storage_policy.py +67 -67
  42. pulumi_vsphere/vnic.py +93 -89
  43. {pulumi_vsphere-4.10.3a1723624830.dist-info → pulumi_vsphere-4.11.0a1.dist-info}/METADATA +1 -1
  44. pulumi_vsphere-4.11.0a1.dist-info/RECORD +86 -0
  45. {pulumi_vsphere-4.10.3a1723624830.dist-info → pulumi_vsphere-4.11.0a1.dist-info}/WHEEL +1 -1
  46. pulumi_vsphere-4.10.3a1723624830.dist-info/RECORD +0 -86
  47. {pulumi_vsphere-4.10.3a1723624830.dist-info → pulumi_vsphere-4.11.0a1.dist-info}/top_level.txt +0 -0
pulumi_vsphere/outputs.py CHANGED
@@ -762,12 +762,9 @@ class EntityPermissionsPermission(dict):
762
762
  role_id: str,
763
763
  user_or_group: str):
764
764
  """
765
- :param bool is_group: Whether `user_or_group` field refers to a user or a
766
- group. True for a group and false for a user.
767
- :param bool propagate: Whether or not this permission propagates down the
768
- hierarchy to sub-entities.
769
- :param str role_id: The role id of the role to be given to the user on
770
- the specified entity.
765
+ :param bool is_group: Whether user_or_group field refers to a user or a group. True for a group and false for a user.
766
+ :param bool propagate: Whether or not this permission propagates down the hierarchy to sub-entities.
767
+ :param str role_id: The role id of the role to be given to the user on the specified entity.
771
768
  :param str user_or_group: The user/group getting the permission.
772
769
  """
773
770
  pulumi.set(__self__, "is_group", is_group)
@@ -779,8 +776,7 @@ class EntityPermissionsPermission(dict):
779
776
  @pulumi.getter(name="isGroup")
780
777
  def is_group(self) -> bool:
781
778
  """
782
- Whether `user_or_group` field refers to a user or a
783
- group. True for a group and false for a user.
779
+ Whether user_or_group field refers to a user or a group. True for a group and false for a user.
784
780
  """
785
781
  return pulumi.get(self, "is_group")
786
782
 
@@ -788,8 +784,7 @@ class EntityPermissionsPermission(dict):
788
784
  @pulumi.getter
789
785
  def propagate(self) -> bool:
790
786
  """
791
- Whether or not this permission propagates down the
792
- hierarchy to sub-entities.
787
+ Whether or not this permission propagates down the hierarchy to sub-entities.
793
788
  """
794
789
  return pulumi.get(self, "propagate")
795
790
 
@@ -797,8 +792,7 @@ class EntityPermissionsPermission(dict):
797
792
  @pulumi.getter(name="roleId")
798
793
  def role_id(self) -> str:
799
794
  """
800
- The role id of the role to be given to the user on
801
- the specified entity.
795
+ The role id of the role to be given to the user on the specified entity.
802
796
  """
803
797
  return pulumi.get(self, "role_id")
804
798
 
@@ -3697,14 +3691,13 @@ class GetHostVgpuProfileVgpuProfileResult(dict):
3697
3691
  """
3698
3692
  :param bool disk_snapshot_supported: Indicates whether the GPU plugin on this host is
3699
3693
  capable of disk-only snapshots when VM is not powered off.
3700
- :param bool memory_snapshot_supported: Indicates whether the GPU plugin on this host
3701
- is capable of memory snapshots.
3702
- :param bool migrate_supported: Indicates whether the GPU plugin on this host is
3703
- capable of migration.
3704
- :param bool suspend_supported: Indicates whether the GPU plugin on this host is
3705
- capable of suspend-resume.
3706
- :param str vgpu: Name of a particular vGPU available as a shared GPU device (vGPU
3707
- profile).
3694
+ :param bool memory_snapshot_supported: Indicates whether the GPU plugin on this host is
3695
+ capable of memory snapshots.
3696
+ :param bool migrate_supported: Indicates whether the GPU plugin on this host is capable
3697
+ of migration.
3698
+ :param bool suspend_supported: Indicates whether the GPU plugin on this host is capable
3699
+ of suspend-resume.
3700
+ :param str vgpu: Name of a particular vGPU available as a shared GPU device (vGPU profile).
3708
3701
  """
3709
3702
  pulumi.set(__self__, "disk_snapshot_supported", disk_snapshot_supported)
3710
3703
  pulumi.set(__self__, "memory_snapshot_supported", memory_snapshot_supported)
@@ -3725,8 +3718,8 @@ class GetHostVgpuProfileVgpuProfileResult(dict):
3725
3718
  @pulumi.getter(name="memorySnapshotSupported")
3726
3719
  def memory_snapshot_supported(self) -> bool:
3727
3720
  """
3728
- Indicates whether the GPU plugin on this host
3729
- is capable of memory snapshots.
3721
+ Indicates whether the GPU plugin on this host is
3722
+ capable of memory snapshots.
3730
3723
  """
3731
3724
  return pulumi.get(self, "memory_snapshot_supported")
3732
3725
 
@@ -3734,8 +3727,8 @@ class GetHostVgpuProfileVgpuProfileResult(dict):
3734
3727
  @pulumi.getter(name="migrateSupported")
3735
3728
  def migrate_supported(self) -> bool:
3736
3729
  """
3737
- Indicates whether the GPU plugin on this host is
3738
- capable of migration.
3730
+ Indicates whether the GPU plugin on this host is capable
3731
+ of migration.
3739
3732
  """
3740
3733
  return pulumi.get(self, "migrate_supported")
3741
3734
 
@@ -3743,8 +3736,8 @@ class GetHostVgpuProfileVgpuProfileResult(dict):
3743
3736
  @pulumi.getter(name="suspendSupported")
3744
3737
  def suspend_supported(self) -> bool:
3745
3738
  """
3746
- Indicates whether the GPU plugin on this host is
3747
- capable of suspend-resume.
3739
+ Indicates whether the GPU plugin on this host is capable
3740
+ of suspend-resume.
3748
3741
  """
3749
3742
  return pulumi.get(self, "suspend_supported")
3750
3743
 
@@ -3752,8 +3745,7 @@ class GetHostVgpuProfileVgpuProfileResult(dict):
3752
3745
  @pulumi.getter
3753
3746
  def vgpu(self) -> str:
3754
3747
  """
3755
- Name of a particular vGPU available as a shared GPU device (vGPU
3756
- profile).
3748
+ Name of a particular vGPU available as a shared GPU device (vGPU profile).
3757
3749
  """
3758
3750
  return pulumi.get(self, "vgpu")
3759
3751
 
@@ -3832,21 +3824,21 @@ class GetVirtualMachineNetworkInterfaceResult(dict):
3832
3824
  bandwidth_reservation: Optional[int] = None,
3833
3825
  bandwidth_share_level: Optional[str] = None):
3834
3826
  """
3835
- :param str adapter_type: The network interface types for each network interface found
3836
- on the virtual machine, in device bus order. Will be one of `e1000`,
3837
- `e1000e`, `vmxnet3vrdma`, or `vmxnet3`.
3827
+ :param str adapter_type: The network interface types for each network interface found
3828
+ on the virtual machine, in device bus order. Will be one of `e1000`, `e1000e`,
3829
+ `vmxnet3vrdma`, or `vmxnet3`.
3838
3830
  :param int bandwidth_share_count: The share count for this network interface when the
3839
3831
  share level is custom.
3840
3832
  :param str mac_address: The MAC address of this network interface.
3841
- :param str network_id: The managed object reference ID of the network this interface
3842
- is connected to.
3833
+ :param str network_id: The managed object reference ID of the network this interface is
3834
+ connected to.
3843
3835
  :param str physical_function: The ID of the Physical SR-IOV NIC to attach to, e.g. '0000:d8:00.0'
3844
- :param int bandwidth_limit: The upper bandwidth limit of this network interface,
3836
+ :param int bandwidth_limit: The upper bandwidth limit of this network interface,
3845
3837
  in Mbits/sec.
3846
- :param int bandwidth_reservation: The bandwidth reservation of this network
3847
- interface, in Mbits/sec.
3848
- :param str bandwidth_share_level: The bandwidth share allocation level for this
3849
- interface. Can be one of `low`, `normal`, `high`, or `custom`.
3838
+ :param int bandwidth_reservation: The bandwidth reservation of this network interface,
3839
+ in Mbits/sec.
3840
+ :param str bandwidth_share_level: The bandwidth share allocation level for this interface.
3841
+ Can be one of `low`, `normal`, `high`, or `custom`.
3850
3842
  """
3851
3843
  pulumi.set(__self__, "adapter_type", adapter_type)
3852
3844
  pulumi.set(__self__, "bandwidth_share_count", bandwidth_share_count)
@@ -3864,9 +3856,9 @@ class GetVirtualMachineNetworkInterfaceResult(dict):
3864
3856
  @pulumi.getter(name="adapterType")
3865
3857
  def adapter_type(self) -> str:
3866
3858
  """
3867
- The network interface types for each network interface found
3868
- on the virtual machine, in device bus order. Will be one of `e1000`,
3869
- `e1000e`, `vmxnet3vrdma`, or `vmxnet3`.
3859
+ The network interface types for each network interface found
3860
+ on the virtual machine, in device bus order. Will be one of `e1000`, `e1000e`,
3861
+ `vmxnet3vrdma`, or `vmxnet3`.
3870
3862
  """
3871
3863
  return pulumi.get(self, "adapter_type")
3872
3864
 
@@ -3891,8 +3883,8 @@ class GetVirtualMachineNetworkInterfaceResult(dict):
3891
3883
  @pulumi.getter(name="networkId")
3892
3884
  def network_id(self) -> str:
3893
3885
  """
3894
- The managed object reference ID of the network this interface
3895
- is connected to.
3886
+ The managed object reference ID of the network this interface is
3887
+ connected to.
3896
3888
  """
3897
3889
  return pulumi.get(self, "network_id")
3898
3890
 
@@ -3908,7 +3900,7 @@ class GetVirtualMachineNetworkInterfaceResult(dict):
3908
3900
  @pulumi.getter(name="bandwidthLimit")
3909
3901
  def bandwidth_limit(self) -> Optional[int]:
3910
3902
  """
3911
- The upper bandwidth limit of this network interface,
3903
+ The upper bandwidth limit of this network interface,
3912
3904
  in Mbits/sec.
3913
3905
  """
3914
3906
  return pulumi.get(self, "bandwidth_limit")
@@ -3917,8 +3909,8 @@ class GetVirtualMachineNetworkInterfaceResult(dict):
3917
3909
  @pulumi.getter(name="bandwidthReservation")
3918
3910
  def bandwidth_reservation(self) -> Optional[int]:
3919
3911
  """
3920
- The bandwidth reservation of this network
3921
- interface, in Mbits/sec.
3912
+ The bandwidth reservation of this network interface,
3913
+ in Mbits/sec.
3922
3914
  """
3923
3915
  return pulumi.get(self, "bandwidth_reservation")
3924
3916
 
@@ -3926,8 +3918,8 @@ class GetVirtualMachineNetworkInterfaceResult(dict):
3926
3918
  @pulumi.getter(name="bandwidthShareLevel")
3927
3919
  def bandwidth_share_level(self) -> Optional[str]:
3928
3920
  """
3929
- The bandwidth share allocation level for this
3930
- interface. Can be one of `low`, `normal`, `high`, or `custom`.
3921
+ The bandwidth share allocation level for this interface.
3922
+ Can be one of `low`, `normal`, `high`, or `custom`.
3931
3923
  """
3932
3924
  return pulumi.get(self, "bandwidth_share_level")
3933
3925
 
@@ -196,8 +196,10 @@ class ProviderArgs:
196
196
 
197
197
  @property
198
198
  @pulumi.getter(name="vcenterServer")
199
- @_utilities.deprecated("""This field has been renamed to vsphere_server.""")
200
199
  def vcenter_server(self) -> Optional[pulumi.Input[str]]:
200
+ warnings.warn("""This field has been renamed to vsphere_server.""", DeprecationWarning)
201
+ pulumi.log.warn("""vcenter_server is deprecated: This field has been renamed to vsphere_server.""")
202
+
201
203
  return pulumi.get(self, "vcenter_server")
202
204
 
203
205
  @vcenter_server.setter
@@ -411,8 +413,10 @@ class Provider(pulumi.ProviderResource):
411
413
 
412
414
  @property
413
415
  @pulumi.getter(name="vcenterServer")
414
- @_utilities.deprecated("""This field has been renamed to vsphere_server.""")
415
416
  def vcenter_server(self) -> pulumi.Output[Optional[str]]:
417
+ warnings.warn("""This field has been renamed to vsphere_server.""", DeprecationWarning)
418
+ pulumi.log.warn("""vcenter_server is deprecated: This field has been renamed to vsphere_server.""")
419
+
416
420
  return pulumi.get(self, "vcenter_server")
417
421
 
418
422
  @property
@@ -1,5 +1,5 @@
1
1
  {
2
2
  "resource": true,
3
3
  "name": "vsphere",
4
- "version": "4.10.3-alpha.1723624830"
4
+ "version": "4.11.0-alpha.1"
5
5
  }
@@ -650,7 +650,7 @@ class ResourcePool(pulumi.CustomResource):
650
650
  For more information on vSphere resource pools, please refer to the
651
651
  [product documentation][ref-vsphere-resource_pools].
652
652
 
653
- [ref-vsphere-resource_pools]: https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-resource-management/GUID-60077B40-66FF-4625-934A-641703ED7601.html
653
+ [ref-vsphere-resource_pools]: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.resmgmt.doc/GUID-60077B40-66FF-4625-934A-641703ED7601.html
654
654
 
655
655
  ## Example Usage
656
656
 
@@ -772,7 +772,7 @@ class ResourcePool(pulumi.CustomResource):
772
772
  For more information on vSphere resource pools, please refer to the
773
773
  [product documentation][ref-vsphere-resource_pools].
774
774
 
775
- [ref-vsphere-resource_pools]: https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-resource-management/GUID-60077B40-66FF-4625-934A-641703ED7601.html
775
+ [ref-vsphere-resource_pools]: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.resmgmt.doc/GUID-60077B40-66FF-4625-934A-641703ED7601.html
776
776
 
777
777
  ## Example Usage
778
778
 
@@ -524,14 +524,14 @@ class Supervisor(pulumi.CustomResource):
524
524
  content_library: Optional[pulumi.Input[str]] = None,
525
525
  dvs_uuid: Optional[pulumi.Input[str]] = None,
526
526
  edge_cluster: Optional[pulumi.Input[str]] = None,
527
- egress_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[Union['SupervisorEgressCidrArgs', 'SupervisorEgressCidrArgsDict']]]]] = None,
528
- ingress_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[Union['SupervisorIngressCidrArgs', 'SupervisorIngressCidrArgsDict']]]]] = None,
527
+ egress_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorEgressCidrArgs']]]]] = None,
528
+ ingress_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorIngressCidrArgs']]]]] = None,
529
529
  main_dns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
530
- management_network: Optional[pulumi.Input[Union['SupervisorManagementNetworkArgs', 'SupervisorManagementNetworkArgsDict']]] = None,
531
- namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[Union['SupervisorNamespaceArgs', 'SupervisorNamespaceArgsDict']]]]] = None,
532
- pod_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[Union['SupervisorPodCidrArgs', 'SupervisorPodCidrArgsDict']]]]] = None,
530
+ management_network: Optional[pulumi.Input[pulumi.InputType['SupervisorManagementNetworkArgs']]] = None,
531
+ namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorNamespaceArgs']]]]] = None,
532
+ pod_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorPodCidrArgs']]]]] = None,
533
533
  search_domains: Optional[pulumi.Input[str]] = None,
534
- service_cidr: Optional[pulumi.Input[Union['SupervisorServiceCidrArgs', 'SupervisorServiceCidrArgsDict']]] = None,
534
+ service_cidr: Optional[pulumi.Input[pulumi.InputType['SupervisorServiceCidrArgs']]] = None,
535
535
  sizing_hint: Optional[pulumi.Input[str]] = None,
536
536
  storage_policy: Optional[pulumi.Input[str]] = None,
537
537
  worker_dns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
@@ -541,77 +541,25 @@ class Supervisor(pulumi.CustomResource):
541
541
 
542
542
  ## Example Usage
543
543
 
544
- ### S
545
-
546
- **Enable Workload Management on a compute cluster**
547
-
548
- ```python
549
- import pulumi
550
- import pulumi_vsphere as vsphere
551
-
552
- vm_class = vsphere.VirtualMachineClass("vm_class",
553
- name="custom-class",
554
- cpus=4,
555
- memory=4096)
556
- supervisor = vsphere.Supervisor("supervisor",
557
- cluster="<compute_cluster_id>",
558
- storage_policy="<storage_policy_name>",
559
- content_library="<content_library_id>",
560
- main_dns="10.0.0.250",
561
- worker_dns="10.0.0.250",
562
- edge_cluster="<edge_cluster_id>",
563
- dvs_uuid="<distributed_switch_uuid>",
564
- sizing_hint="MEDIUM",
565
- management_network={
566
- "network": "<portgroup_id>",
567
- "subnet_mask": "255.255.255.0",
568
- "starting_address": "10.0.0.150",
569
- "gateway": "10.0.0.250",
570
- "address_count": 5,
571
- },
572
- ingress_cidrs=[{
573
- "address": "10.10.10.0",
574
- "prefix": 24,
575
- }],
576
- egress_cidrs=[{
577
- "address": "10.10.11.0",
578
- "prefix": 24,
579
- }],
580
- pod_cidrs=[{
581
- "address": "10.244.10.0",
582
- "prefix": 23,
583
- }],
584
- service_cidr={
585
- "address": "10.10.12.0",
586
- "prefix": 24,
587
- },
588
- search_domains="vsphere.local",
589
- namespaces=[{
590
- "name": "custom-namespace",
591
- "content_libraries": [],
592
- "vm_classes": [vm_class.id],
593
- }])
594
- ```
595
-
596
544
  :param str resource_name: The name of the resource.
597
545
  :param pulumi.ResourceOptions opts: Options for the resource.
598
546
  :param pulumi.Input[str] cluster: The identifier of the compute cluster.
599
547
  :param pulumi.Input[str] content_library: The identifier of the subscribed content library.
600
548
  :param pulumi.Input[str] dvs_uuid: The UUID of the distributed switch.
601
549
  :param pulumi.Input[str] edge_cluster: The identifier of the NSX Edge Cluster.
602
- :param pulumi.Input[Sequence[pulumi.Input[Union['SupervisorEgressCidrArgs', 'SupervisorEgressCidrArgsDict']]]] egress_cidrs: CIDR blocks from which NSX assigns IP addresses used for performing SNAT from container IPs to external IPs.
603
- :param pulumi.Input[Sequence[pulumi.Input[Union['SupervisorIngressCidrArgs', 'SupervisorIngressCidrArgsDict']]]] ingress_cidrs: CIDR blocks from which NSX assigns IP addresses for Kubernetes Ingresses and Kubernetes Services of type LoadBalancer.
550
+ :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorEgressCidrArgs']]]] egress_cidrs: CIDR blocks from which NSX assigns IP addresses used for performing SNAT from container IPs to external IPs.
551
+ :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorIngressCidrArgs']]]] ingress_cidrs: CIDR blocks from which NSX assigns IP addresses for Kubernetes Ingresses and Kubernetes Services of type LoadBalancer.
604
552
  :param pulumi.Input[Sequence[pulumi.Input[str]]] main_dns: The list of addresses of the primary DNS servers.
605
- :param pulumi.Input[Union['SupervisorManagementNetworkArgs', 'SupervisorManagementNetworkArgsDict']] management_network: The configuration for the management network which the control plane VMs will be connected to.
553
+ :param pulumi.Input[pulumi.InputType['SupervisorManagementNetworkArgs']] management_network: The configuration for the management network which the control plane VMs will be connected to.
606
554
  * * `network` - ID of the network. (e.g. a distributed port group).
607
555
  * * `starting_address` - Starting address of the management network range.
608
556
  * * `subnet_mask` - Subnet mask.
609
557
  * * `gateway` - Gateway IP address.
610
558
  * * `address_count` - Number of addresses to allocate. Starts from `starting_address`
611
- :param pulumi.Input[Sequence[pulumi.Input[Union['SupervisorNamespaceArgs', 'SupervisorNamespaceArgsDict']]]] namespaces: The list of namespaces to create in the Supervisor cluster
612
- :param pulumi.Input[Sequence[pulumi.Input[Union['SupervisorPodCidrArgs', 'SupervisorPodCidrArgsDict']]]] pod_cidrs: CIDR blocks from which Kubernetes allocates pod IP addresses. Minimum subnet size is 23.
559
+ :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorNamespaceArgs']]]] namespaces: The list of namespaces to create in the Supervisor cluster
560
+ :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorPodCidrArgs']]]] pod_cidrs: CIDR blocks from which Kubernetes allocates pod IP addresses. Minimum subnet size is 23.
613
561
  :param pulumi.Input[str] search_domains: List of DNS search domains.
614
- :param pulumi.Input[Union['SupervisorServiceCidrArgs', 'SupervisorServiceCidrArgsDict']] service_cidr: CIDR block from which Kubernetes allocates service cluster IP addresses.
562
+ :param pulumi.Input[pulumi.InputType['SupervisorServiceCidrArgs']] service_cidr: CIDR block from which Kubernetes allocates service cluster IP addresses.
615
563
  :param pulumi.Input[str] sizing_hint: The size of the Kubernetes API server.
616
564
  :param pulumi.Input[str] storage_policy: The name of the storage policy.
617
565
  :param pulumi.Input[Sequence[pulumi.Input[str]]] worker_dns: The list of addresses of the DNS servers to use for the worker nodes.
@@ -627,58 +575,6 @@ class Supervisor(pulumi.CustomResource):
627
575
 
628
576
  ## Example Usage
629
577
 
630
- ### S
631
-
632
- **Enable Workload Management on a compute cluster**
633
-
634
- ```python
635
- import pulumi
636
- import pulumi_vsphere as vsphere
637
-
638
- vm_class = vsphere.VirtualMachineClass("vm_class",
639
- name="custom-class",
640
- cpus=4,
641
- memory=4096)
642
- supervisor = vsphere.Supervisor("supervisor",
643
- cluster="<compute_cluster_id>",
644
- storage_policy="<storage_policy_name>",
645
- content_library="<content_library_id>",
646
- main_dns="10.0.0.250",
647
- worker_dns="10.0.0.250",
648
- edge_cluster="<edge_cluster_id>",
649
- dvs_uuid="<distributed_switch_uuid>",
650
- sizing_hint="MEDIUM",
651
- management_network={
652
- "network": "<portgroup_id>",
653
- "subnet_mask": "255.255.255.0",
654
- "starting_address": "10.0.0.150",
655
- "gateway": "10.0.0.250",
656
- "address_count": 5,
657
- },
658
- ingress_cidrs=[{
659
- "address": "10.10.10.0",
660
- "prefix": 24,
661
- }],
662
- egress_cidrs=[{
663
- "address": "10.10.11.0",
664
- "prefix": 24,
665
- }],
666
- pod_cidrs=[{
667
- "address": "10.244.10.0",
668
- "prefix": 23,
669
- }],
670
- service_cidr={
671
- "address": "10.10.12.0",
672
- "prefix": 24,
673
- },
674
- search_domains="vsphere.local",
675
- namespaces=[{
676
- "name": "custom-namespace",
677
- "content_libraries": [],
678
- "vm_classes": [vm_class.id],
679
- }])
680
- ```
681
-
682
578
  :param str resource_name: The name of the resource.
683
579
  :param SupervisorArgs args: The arguments to use to populate this resource's properties.
684
580
  :param pulumi.ResourceOptions opts: Options for the resource.
@@ -698,14 +594,14 @@ class Supervisor(pulumi.CustomResource):
698
594
  content_library: Optional[pulumi.Input[str]] = None,
699
595
  dvs_uuid: Optional[pulumi.Input[str]] = None,
700
596
  edge_cluster: Optional[pulumi.Input[str]] = None,
701
- egress_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[Union['SupervisorEgressCidrArgs', 'SupervisorEgressCidrArgsDict']]]]] = None,
702
- ingress_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[Union['SupervisorIngressCidrArgs', 'SupervisorIngressCidrArgsDict']]]]] = None,
597
+ egress_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorEgressCidrArgs']]]]] = None,
598
+ ingress_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorIngressCidrArgs']]]]] = None,
703
599
  main_dns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
704
- management_network: Optional[pulumi.Input[Union['SupervisorManagementNetworkArgs', 'SupervisorManagementNetworkArgsDict']]] = None,
705
- namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[Union['SupervisorNamespaceArgs', 'SupervisorNamespaceArgsDict']]]]] = None,
706
- pod_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[Union['SupervisorPodCidrArgs', 'SupervisorPodCidrArgsDict']]]]] = None,
600
+ management_network: Optional[pulumi.Input[pulumi.InputType['SupervisorManagementNetworkArgs']]] = None,
601
+ namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorNamespaceArgs']]]]] = None,
602
+ pod_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorPodCidrArgs']]]]] = None,
707
603
  search_domains: Optional[pulumi.Input[str]] = None,
708
- service_cidr: Optional[pulumi.Input[Union['SupervisorServiceCidrArgs', 'SupervisorServiceCidrArgsDict']]] = None,
604
+ service_cidr: Optional[pulumi.Input[pulumi.InputType['SupervisorServiceCidrArgs']]] = None,
709
605
  sizing_hint: Optional[pulumi.Input[str]] = None,
710
606
  storage_policy: Optional[pulumi.Input[str]] = None,
711
607
  worker_dns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
@@ -775,14 +671,14 @@ class Supervisor(pulumi.CustomResource):
775
671
  content_library: Optional[pulumi.Input[str]] = None,
776
672
  dvs_uuid: Optional[pulumi.Input[str]] = None,
777
673
  edge_cluster: Optional[pulumi.Input[str]] = None,
778
- egress_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[Union['SupervisorEgressCidrArgs', 'SupervisorEgressCidrArgsDict']]]]] = None,
779
- ingress_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[Union['SupervisorIngressCidrArgs', 'SupervisorIngressCidrArgsDict']]]]] = None,
674
+ egress_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorEgressCidrArgs']]]]] = None,
675
+ ingress_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorIngressCidrArgs']]]]] = None,
780
676
  main_dns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
781
- management_network: Optional[pulumi.Input[Union['SupervisorManagementNetworkArgs', 'SupervisorManagementNetworkArgsDict']]] = None,
782
- namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[Union['SupervisorNamespaceArgs', 'SupervisorNamespaceArgsDict']]]]] = None,
783
- pod_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[Union['SupervisorPodCidrArgs', 'SupervisorPodCidrArgsDict']]]]] = None,
677
+ management_network: Optional[pulumi.Input[pulumi.InputType['SupervisorManagementNetworkArgs']]] = None,
678
+ namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorNamespaceArgs']]]]] = None,
679
+ pod_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorPodCidrArgs']]]]] = None,
784
680
  search_domains: Optional[pulumi.Input[str]] = None,
785
- service_cidr: Optional[pulumi.Input[Union['SupervisorServiceCidrArgs', 'SupervisorServiceCidrArgsDict']]] = None,
681
+ service_cidr: Optional[pulumi.Input[pulumi.InputType['SupervisorServiceCidrArgs']]] = None,
786
682
  sizing_hint: Optional[pulumi.Input[str]] = None,
787
683
  storage_policy: Optional[pulumi.Input[str]] = None,
788
684
  worker_dns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Supervisor':
@@ -797,19 +693,19 @@ class Supervisor(pulumi.CustomResource):
797
693
  :param pulumi.Input[str] content_library: The identifier of the subscribed content library.
798
694
  :param pulumi.Input[str] dvs_uuid: The UUID of the distributed switch.
799
695
  :param pulumi.Input[str] edge_cluster: The identifier of the NSX Edge Cluster.
800
- :param pulumi.Input[Sequence[pulumi.Input[Union['SupervisorEgressCidrArgs', 'SupervisorEgressCidrArgsDict']]]] egress_cidrs: CIDR blocks from which NSX assigns IP addresses used for performing SNAT from container IPs to external IPs.
801
- :param pulumi.Input[Sequence[pulumi.Input[Union['SupervisorIngressCidrArgs', 'SupervisorIngressCidrArgsDict']]]] ingress_cidrs: CIDR blocks from which NSX assigns IP addresses for Kubernetes Ingresses and Kubernetes Services of type LoadBalancer.
696
+ :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorEgressCidrArgs']]]] egress_cidrs: CIDR blocks from which NSX assigns IP addresses used for performing SNAT from container IPs to external IPs.
697
+ :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorIngressCidrArgs']]]] ingress_cidrs: CIDR blocks from which NSX assigns IP addresses for Kubernetes Ingresses and Kubernetes Services of type LoadBalancer.
802
698
  :param pulumi.Input[Sequence[pulumi.Input[str]]] main_dns: The list of addresses of the primary DNS servers.
803
- :param pulumi.Input[Union['SupervisorManagementNetworkArgs', 'SupervisorManagementNetworkArgsDict']] management_network: The configuration for the management network which the control plane VMs will be connected to.
699
+ :param pulumi.Input[pulumi.InputType['SupervisorManagementNetworkArgs']] management_network: The configuration for the management network which the control plane VMs will be connected to.
804
700
  * * `network` - ID of the network. (e.g. a distributed port group).
805
701
  * * `starting_address` - Starting address of the management network range.
806
702
  * * `subnet_mask` - Subnet mask.
807
703
  * * `gateway` - Gateway IP address.
808
704
  * * `address_count` - Number of addresses to allocate. Starts from `starting_address`
809
- :param pulumi.Input[Sequence[pulumi.Input[Union['SupervisorNamespaceArgs', 'SupervisorNamespaceArgsDict']]]] namespaces: The list of namespaces to create in the Supervisor cluster
810
- :param pulumi.Input[Sequence[pulumi.Input[Union['SupervisorPodCidrArgs', 'SupervisorPodCidrArgsDict']]]] pod_cidrs: CIDR blocks from which Kubernetes allocates pod IP addresses. Minimum subnet size is 23.
705
+ :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorNamespaceArgs']]]] namespaces: The list of namespaces to create in the Supervisor cluster
706
+ :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SupervisorPodCidrArgs']]]] pod_cidrs: CIDR blocks from which Kubernetes allocates pod IP addresses. Minimum subnet size is 23.
811
707
  :param pulumi.Input[str] search_domains: List of DNS search domains.
812
- :param pulumi.Input[Union['SupervisorServiceCidrArgs', 'SupervisorServiceCidrArgsDict']] service_cidr: CIDR block from which Kubernetes allocates service cluster IP addresses.
708
+ :param pulumi.Input[pulumi.InputType['SupervisorServiceCidrArgs']] service_cidr: CIDR block from which Kubernetes allocates service cluster IP addresses.
813
709
  :param pulumi.Input[str] sizing_hint: The size of the Kubernetes API server.
814
710
  :param pulumi.Input[str] storage_policy: The name of the storage policy.
815
711
  :param pulumi.Input[Sequence[pulumi.Input[str]]] worker_dns: The list of addresses of the DNS servers to use for the worker nodes.