pulumi-vsphere 4.12.0a1727221820__py3-none-any.whl → 4.12.0a1727848995__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-vsphere might be problematic. Click here for more details.

Files changed (40) hide show
  1. pulumi_vsphere/compute_cluster.py +176 -2
  2. pulumi_vsphere/compute_cluster_host_group.py +62 -2
  3. pulumi_vsphere/compute_cluster_vm_affinity_rule.py +24 -2
  4. pulumi_vsphere/compute_cluster_vm_anti_affinity_rule.py +36 -2
  5. pulumi_vsphere/compute_cluster_vm_dependency_rule.py +200 -2
  6. pulumi_vsphere/compute_cluster_vm_group.py +158 -2
  7. pulumi_vsphere/compute_cluster_vm_host_rule.py +68 -2
  8. pulumi_vsphere/content_library.py +98 -2
  9. pulumi_vsphere/content_library_item.py +138 -2
  10. pulumi_vsphere/custom_attribute.py +72 -2
  11. pulumi_vsphere/datacenter.py +14 -4
  12. pulumi_vsphere/datastore_cluster.py +58 -2
  13. pulumi_vsphere/datastore_cluster_vm_anti_affinity_rule.py +154 -2
  14. pulumi_vsphere/distributed_port_group.py +280 -2
  15. pulumi_vsphere/distributed_virtual_switch.py +256 -2
  16. pulumi_vsphere/dpm_host_override.py +58 -2
  17. pulumi_vsphere/drs_vm_override.py +62 -2
  18. pulumi_vsphere/folder.py +136 -2
  19. pulumi_vsphere/get_compute_cluster_host_group.py +2 -2
  20. pulumi_vsphere/ha_vm_override.py +158 -2
  21. pulumi_vsphere/host.py +250 -2
  22. pulumi_vsphere/host_port_group.py +12 -2
  23. pulumi_vsphere/host_virtual_switch.py +64 -2
  24. pulumi_vsphere/nas_datastore.py +62 -2
  25. pulumi_vsphere/pulumi-plugin.json +1 -1
  26. pulumi_vsphere/resource_pool.py +4 -16
  27. pulumi_vsphere/role.py +28 -2
  28. pulumi_vsphere/storage_drs_vm_override.py +128 -2
  29. pulumi_vsphere/tag.py +154 -2
  30. pulumi_vsphere/tag_category.py +78 -2
  31. pulumi_vsphere/vapp_container.py +158 -2
  32. pulumi_vsphere/vapp_entity.py +142 -2
  33. pulumi_vsphere/virtual_disk.py +76 -2
  34. pulumi_vsphere/virtual_machine.py +48 -2
  35. pulumi_vsphere/vmfs_datastore.py +266 -2
  36. pulumi_vsphere/vnic.py +14 -4
  37. {pulumi_vsphere-4.12.0a1727221820.dist-info → pulumi_vsphere-4.12.0a1727848995.dist-info}/METADATA +1 -1
  38. {pulumi_vsphere-4.12.0a1727221820.dist-info → pulumi_vsphere-4.12.0a1727848995.dist-info}/RECORD +40 -40
  39. {pulumi_vsphere-4.12.0a1727221820.dist-info → pulumi_vsphere-4.12.0a1727848995.dist-info}/WHEEL +0 -0
  40. {pulumi_vsphere-4.12.0a1727221820.dist-info → pulumi_vsphere-4.12.0a1727848995.dist-info}/top_level.txt +0 -0
@@ -946,7 +946,35 @@ class DatastoreCluster(pulumi.CustomResource):
946
946
  tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
947
947
  __props__=None):
948
948
  """
949
- Create a DatastoreCluster resource with the given unique name, props, and options.
949
+ The `DatastoreCluster` resource can be used to create and manage
950
+ datastore clusters. This can be used to create groups of datastores with a
951
+ shared management interface, allowing for resource control and load balancing
952
+ through Storage DRS.
953
+
954
+ For more information on vSphere datastore clusters and Storage DRS, see [this
955
+ page][ref-vsphere-datastore-clusters].
956
+
957
+ [ref-vsphere-datastore-clusters]: https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-resource-management/GUID-598DF695-107E-406B-9C95-0AF961FC227A.html
958
+
959
+ > **NOTE:** This resource requires vCenter and is not available on direct ESXi
960
+ connections.
961
+
962
+ > **NOTE:** Storage DRS requires a vSphere Enterprise Plus license.
963
+
964
+ ## Import
965
+
966
+ An existing datastore cluster can be imported into this resource
967
+
968
+ via the path to the cluster, via the following command:
969
+
970
+ ```sh
971
+ $ pulumi import vsphere:index/datastoreCluster:DatastoreCluster datastore_cluster /dc1/datastore/ds-cluster
972
+ ```
973
+
974
+ The above would import the datastore cluster named `ds-cluster` that is located
975
+
976
+ in the `dc1` datacenter.
977
+
950
978
  :param str resource_name: The name of the resource.
951
979
  :param pulumi.ResourceOptions opts: Options for the resource.
952
980
  :param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: A map of custom attribute ids to attribute
@@ -1004,7 +1032,35 @@ class DatastoreCluster(pulumi.CustomResource):
1004
1032
  args: DatastoreClusterArgs,
1005
1033
  opts: Optional[pulumi.ResourceOptions] = None):
1006
1034
  """
1007
- Create a DatastoreCluster resource with the given unique name, props, and options.
1035
+ The `DatastoreCluster` resource can be used to create and manage
1036
+ datastore clusters. This can be used to create groups of datastores with a
1037
+ shared management interface, allowing for resource control and load balancing
1038
+ through Storage DRS.
1039
+
1040
+ For more information on vSphere datastore clusters and Storage DRS, see [this
1041
+ page][ref-vsphere-datastore-clusters].
1042
+
1043
+ [ref-vsphere-datastore-clusters]: https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-resource-management/GUID-598DF695-107E-406B-9C95-0AF961FC227A.html
1044
+
1045
+ > **NOTE:** This resource requires vCenter and is not available on direct ESXi
1046
+ connections.
1047
+
1048
+ > **NOTE:** Storage DRS requires a vSphere Enterprise Plus license.
1049
+
1050
+ ## Import
1051
+
1052
+ An existing datastore cluster can be imported into this resource
1053
+
1054
+ via the path to the cluster, via the following command:
1055
+
1056
+ ```sh
1057
+ $ pulumi import vsphere:index/datastoreCluster:DatastoreCluster datastore_cluster /dc1/datastore/ds-cluster
1058
+ ```
1059
+
1060
+ The above would import the datastore cluster named `ds-cluster` that is located
1061
+
1062
+ in the `dc1` datacenter.
1063
+
1008
1064
  :param str resource_name: The name of the resource.
1009
1065
  :param DatastoreClusterArgs args: The arguments to use to populate this resource's properties.
1010
1066
  :param pulumi.ResourceOptions opts: Options for the resource.
@@ -221,7 +221,83 @@ class DatastoreClusterVmAntiAffinityRule(pulumi.CustomResource):
221
221
  virtual_machine_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
222
222
  __props__=None):
223
223
  """
224
- Create a DatastoreClusterVmAntiAffinityRule resource with the given unique name, props, and options.
224
+ The `DatastoreClusterVmAntiAffinityRule` resource can be used to
225
+ manage VM anti-affinity rules in a datastore cluster, either created by the
226
+ `DatastoreCluster` resource or looked up
227
+ by the `DatastoreCluster` data source.
228
+
229
+ This rule can be used to tell a set to virtual machines to run on different
230
+ datastores within a cluster, useful for preventing single points of failure in
231
+ application cluster scenarios. When configured, Storage DRS will make a best effort to
232
+ ensure that the virtual machines run on different datastores, or prevent any
233
+ operation that would keep that from happening, depending on the value of the
234
+ `mandatory` flag.
235
+
236
+ > **NOTE:** This resource requires vCenter and is not available on direct ESXi
237
+ connections.
238
+
239
+ > **NOTE:** Storage DRS requires a vSphere Enterprise Plus license.
240
+
241
+ ## Example Usage
242
+
243
+ The example below creates two virtual machines in a cluster using the
244
+ `VirtualMachine` resource, creating the
245
+ virtual machines in the datastore cluster looked up by the
246
+ `DatastoreCluster` data
247
+ source. It then creates an anti-affinity rule for these two virtual machines,
248
+ ensuring they will run on different datastores whenever possible.
249
+
250
+ ```python
251
+ import pulumi
252
+ import pulumi_vsphere as vsphere
253
+
254
+ datacenter = vsphere.get_datacenter(name="dc-01")
255
+ datastore_cluster = vsphere.get_datastore_cluster(name="datastore-cluster1",
256
+ datacenter_id=datacenter.id)
257
+ cluster = vsphere.get_compute_cluster(name="cluster-01",
258
+ datacenter_id=datacenter.id)
259
+ network = vsphere.get_network(name="network1",
260
+ datacenter_id=datacenter.id)
261
+ vm = []
262
+ for range in [{"value": i} for i in range(0, 2)]:
263
+ vm.append(vsphere.VirtualMachine(f"vm-{range['value']}",
264
+ name=f"test-{range['value']}",
265
+ resource_pool_id=cluster.resource_pool_id,
266
+ datastore_cluster_id=datastore_cluster.id,
267
+ num_cpus=2,
268
+ memory=2048,
269
+ guest_id="otherLinux64Guest",
270
+ network_interfaces=[{
271
+ "network_id": network.id,
272
+ }],
273
+ disks=[{
274
+ "label": "disk0",
275
+ "size": 20,
276
+ }]))
277
+ cluster_vm_anti_affinity_rule = vsphere.DatastoreClusterVmAntiAffinityRule("cluster_vm_anti_affinity_rule",
278
+ name="test-datastore-cluster-vm-anti-affinity-rule",
279
+ datastore_cluster_id=datastore_cluster.id,
280
+ virtual_machine_ids=[[__item.id for __item in vm]])
281
+ ```
282
+
283
+ ## Import
284
+
285
+ An existing rule can be imported into this resource by supplying
286
+
287
+ both the path to the cluster, and the name the rule. If the name or cluster is
288
+
289
+ not found, or if the rule is of a different type, an error will be returned. An
290
+
291
+ example is below:
292
+
293
+ ```sh
294
+ $ pulumi import vsphere:index/datastoreClusterVmAntiAffinityRule:DatastoreClusterVmAntiAffinityRule cluster_vm_anti_affinity_rule \\
295
+ ```
296
+
297
+ '{"compute_cluster_path": "/dc1/datastore/cluster1", \\
298
+
299
+ "name": "pulumi-test-datastore-cluster-vm-anti-affinity-rule"}'
300
+
225
301
  :param str resource_name: The name of the resource.
226
302
  :param pulumi.ResourceOptions opts: Options for the resource.
227
303
  :param pulumi.Input[str] datastore_cluster_id: The managed object reference
@@ -243,7 +319,83 @@ class DatastoreClusterVmAntiAffinityRule(pulumi.CustomResource):
243
319
  args: DatastoreClusterVmAntiAffinityRuleArgs,
244
320
  opts: Optional[pulumi.ResourceOptions] = None):
245
321
  """
246
- Create a DatastoreClusterVmAntiAffinityRule resource with the given unique name, props, and options.
322
+ The `DatastoreClusterVmAntiAffinityRule` resource can be used to
323
+ manage VM anti-affinity rules in a datastore cluster, either created by the
324
+ `DatastoreCluster` resource or looked up
325
+ by the `DatastoreCluster` data source.
326
+
327
+ This rule can be used to tell a set to virtual machines to run on different
328
+ datastores within a cluster, useful for preventing single points of failure in
329
+ application cluster scenarios. When configured, Storage DRS will make a best effort to
330
+ ensure that the virtual machines run on different datastores, or prevent any
331
+ operation that would keep that from happening, depending on the value of the
332
+ `mandatory` flag.
333
+
334
+ > **NOTE:** This resource requires vCenter and is not available on direct ESXi
335
+ connections.
336
+
337
+ > **NOTE:** Storage DRS requires a vSphere Enterprise Plus license.
338
+
339
+ ## Example Usage
340
+
341
+ The example below creates two virtual machines in a cluster using the
342
+ `VirtualMachine` resource, creating the
343
+ virtual machines in the datastore cluster looked up by the
344
+ `DatastoreCluster` data
345
+ source. It then creates an anti-affinity rule for these two virtual machines,
346
+ ensuring they will run on different datastores whenever possible.
347
+
348
+ ```python
349
+ import pulumi
350
+ import pulumi_vsphere as vsphere
351
+
352
+ datacenter = vsphere.get_datacenter(name="dc-01")
353
+ datastore_cluster = vsphere.get_datastore_cluster(name="datastore-cluster1",
354
+ datacenter_id=datacenter.id)
355
+ cluster = vsphere.get_compute_cluster(name="cluster-01",
356
+ datacenter_id=datacenter.id)
357
+ network = vsphere.get_network(name="network1",
358
+ datacenter_id=datacenter.id)
359
+ vm = []
360
+ for range in [{"value": i} for i in range(0, 2)]:
361
+ vm.append(vsphere.VirtualMachine(f"vm-{range['value']}",
362
+ name=f"test-{range['value']}",
363
+ resource_pool_id=cluster.resource_pool_id,
364
+ datastore_cluster_id=datastore_cluster.id,
365
+ num_cpus=2,
366
+ memory=2048,
367
+ guest_id="otherLinux64Guest",
368
+ network_interfaces=[{
369
+ "network_id": network.id,
370
+ }],
371
+ disks=[{
372
+ "label": "disk0",
373
+ "size": 20,
374
+ }]))
375
+ cluster_vm_anti_affinity_rule = vsphere.DatastoreClusterVmAntiAffinityRule("cluster_vm_anti_affinity_rule",
376
+ name="test-datastore-cluster-vm-anti-affinity-rule",
377
+ datastore_cluster_id=datastore_cluster.id,
378
+ virtual_machine_ids=[[__item.id for __item in vm]])
379
+ ```
380
+
381
+ ## Import
382
+
383
+ An existing rule can be imported into this resource by supplying
384
+
385
+ both the path to the cluster, and the name the rule. If the name or cluster is
386
+
387
+ not found, or if the rule is of a different type, an error will be returned. An
388
+
389
+ example is below:
390
+
391
+ ```sh
392
+ $ pulumi import vsphere:index/datastoreClusterVmAntiAffinityRule:DatastoreClusterVmAntiAffinityRule cluster_vm_anti_affinity_rule \\
393
+ ```
394
+
395
+ '{"compute_cluster_path": "/dc1/datastore/cluster1", \\
396
+
397
+ "name": "pulumi-test-datastore-cluster-vm-anti-affinity-rule"}'
398
+
247
399
  :param str resource_name: The name of the resource.
248
400
  :param DatastoreClusterVmAntiAffinityRuleArgs args: The arguments to use to populate this resource's properties.
249
401
  :param pulumi.ResourceOptions opts: Options for the resource.
@@ -1699,7 +1699,146 @@ class DistributedPortGroup(pulumi.CustomResource):
1699
1699
  vlan_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[Union['DistributedPortGroupVlanRangeArgs', 'DistributedPortGroupVlanRangeArgsDict']]]]] = None,
1700
1700
  __props__=None):
1701
1701
  """
1702
- Create a DistributedPortGroup resource with the given unique name, props, and options.
1702
+ The `DistributedPortGroup` resource can be used to manage
1703
+ distributed port groups connected to vSphere Distributed Switches (VDS).
1704
+ A vSphere Distributed Switch can be managed by the
1705
+ `DistributedVirtualSwitch` resource.
1706
+
1707
+ Distributed port groups can be used as networks for virtual machines, allowing
1708
+ the virtual machines to use the networking supplied by a vSphere Distributed
1709
+ Switch, with a set of policies that apply to that individual network, if
1710
+ desired.
1711
+
1712
+ * For an overview on vSphere networking concepts, refer to the vSphere
1713
+ [product documentation][ref-vsphere-net-concepts].
1714
+
1715
+ * For more information on distributed port groups, refer to the vSphere
1716
+ [product documentation][ref-vsphere-dvportgroup].
1717
+
1718
+ [ref-vsphere-net-concepts]: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.networking.doc/GUID-2B11DBB8-CB3C-4AFF-8885-EFEA0FC562F4.html
1719
+ [ref-vsphere-dvportgroup]: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.networking.doc/GUID-69933F6E-2442-46CF-AA17-1196CB9A0A09.html
1720
+
1721
+ > **NOTE:** This resource requires vCenter and is not available on
1722
+ direct ESXi host connections.
1723
+
1724
+ ## Example Usage
1725
+
1726
+ The configuration below builds on the example given in the
1727
+ `DistributedVirtualSwitch` resource by
1728
+ adding the `DistributedPortGroup` resource, attaching itself to the
1729
+ vSphere Distributed Switch and assigning VLAN ID 1000.
1730
+
1731
+ ```python
1732
+ import pulumi
1733
+ import pulumi_vsphere as vsphere
1734
+
1735
+ config = pulumi.Config()
1736
+ hosts = config.get_object("hosts")
1737
+ if hosts is None:
1738
+ hosts = [
1739
+ "esxi-01.example.com",
1740
+ "esxi-02.example.com",
1741
+ "esxi-03.example.com",
1742
+ ]
1743
+ network_interfaces = config.get_object("networkInterfaces")
1744
+ if network_interfaces is None:
1745
+ network_interfaces = [
1746
+ "vmnic0",
1747
+ "vmnic1",
1748
+ "vmnic2",
1749
+ "vmnic3",
1750
+ ]
1751
+ datacenter = vsphere.get_datacenter(name="dc-01")
1752
+ host = [vsphere.get_host(name=hosts[__index],
1753
+ datacenter_id=datacenter.id) for __index in range(len(hosts))]
1754
+ vds = vsphere.DistributedVirtualSwitch("vds",
1755
+ name="vds-01",
1756
+ datacenter_id=datacenter.id,
1757
+ uplinks=[
1758
+ "uplink1",
1759
+ "uplink2",
1760
+ "uplink3",
1761
+ "uplink4",
1762
+ ],
1763
+ active_uplinks=[
1764
+ "uplink1",
1765
+ "uplink2",
1766
+ ],
1767
+ standby_uplinks=[
1768
+ "uplink3",
1769
+ "uplink4",
1770
+ ],
1771
+ hosts=[
1772
+ {
1773
+ "host_system_id": host[0].id,
1774
+ "devices": [network_interfaces],
1775
+ },
1776
+ {
1777
+ "host_system_id": host[1].id,
1778
+ "devices": [network_interfaces],
1779
+ },
1780
+ {
1781
+ "host_system_id": host[2].id,
1782
+ "devices": [network_interfaces],
1783
+ },
1784
+ ])
1785
+ pg = vsphere.DistributedPortGroup("pg",
1786
+ name="pg-01",
1787
+ distributed_virtual_switch_uuid=vds.id,
1788
+ vlan_id=1000)
1789
+ ```
1790
+
1791
+ ### Overriding VDS policies
1792
+
1793
+ All of the default port policies available in the
1794
+ `DistributedVirtualSwitch` resource can be overridden on the port
1795
+ group level by specifying new settings for them.
1796
+
1797
+ As an example, we also take this example from the
1798
+ `DistributedVirtualSwitch` resource where we manually specify our
1799
+ uplink count and uplink order. While the vSphere Distributed Switch has a
1800
+ default policy of using the first uplink as an active uplink and the second
1801
+ one as a standby, the overridden port group policy means that both uplinks
1802
+ will be used as active uplinks in this specific port group.
1803
+
1804
+ ```python
1805
+ import pulumi
1806
+ import pulumi_vsphere as vsphere
1807
+
1808
+ vds = vsphere.DistributedVirtualSwitch("vds",
1809
+ name="vds-01",
1810
+ datacenter_id=datacenter["id"],
1811
+ uplinks=[
1812
+ "uplink1",
1813
+ "uplink2",
1814
+ ],
1815
+ active_uplinks=["uplink1"],
1816
+ standby_uplinks=["uplink2"])
1817
+ pg = vsphere.DistributedPortGroup("pg",
1818
+ name="pg-01",
1819
+ distributed_virtual_switch_uuid=vds.id,
1820
+ vlan_id=1000,
1821
+ active_uplinks=[
1822
+ "uplink1",
1823
+ "uplink2",
1824
+ ],
1825
+ standby_uplinks=[])
1826
+ ```
1827
+
1828
+ ## Import
1829
+
1830
+ An existing port group can be imported into this resource using
1831
+
1832
+ the managed object id of the port group, via the following command:
1833
+
1834
+ ```sh
1835
+ $ pulumi import vsphere:index/distributedPortGroup:DistributedPortGroup pg /dc-01/network/pg-01
1836
+ ```
1837
+
1838
+ The above would import the port group named `pg-01` that is located in the `dc-01`
1839
+
1840
+ datacenter.
1841
+
1703
1842
  :param str resource_name: The name of the resource.
1704
1843
  :param pulumi.ResourceOptions opts: Options for the resource.
1705
1844
  :param pulumi.Input[Sequence[pulumi.Input[str]]] active_uplinks: List of active uplinks used for load balancing, matching the names of the uplinks assigned in the DVS.
@@ -1784,7 +1923,146 @@ class DistributedPortGroup(pulumi.CustomResource):
1784
1923
  args: DistributedPortGroupArgs,
1785
1924
  opts: Optional[pulumi.ResourceOptions] = None):
1786
1925
  """
1787
- Create a DistributedPortGroup resource with the given unique name, props, and options.
1926
+ The `DistributedPortGroup` resource can be used to manage
1927
+ distributed port groups connected to vSphere Distributed Switches (VDS).
1928
+ A vSphere Distributed Switch can be managed by the
1929
+ `DistributedVirtualSwitch` resource.
1930
+
1931
+ Distributed port groups can be used as networks for virtual machines, allowing
1932
+ the virtual machines to use the networking supplied by a vSphere Distributed
1933
+ Switch, with a set of policies that apply to that individual network, if
1934
+ desired.
1935
+
1936
+ * For an overview on vSphere networking concepts, refer to the vSphere
1937
+ [product documentation][ref-vsphere-net-concepts].
1938
+
1939
+ * For more information on distributed port groups, refer to the vSphere
1940
+ [product documentation][ref-vsphere-dvportgroup].
1941
+
1942
+ [ref-vsphere-net-concepts]: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.networking.doc/GUID-2B11DBB8-CB3C-4AFF-8885-EFEA0FC562F4.html
1943
+ [ref-vsphere-dvportgroup]: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.networking.doc/GUID-69933F6E-2442-46CF-AA17-1196CB9A0A09.html
1944
+
1945
+ > **NOTE:** This resource requires vCenter and is not available on
1946
+ direct ESXi host connections.
1947
+
1948
+ ## Example Usage
1949
+
1950
+ The configuration below builds on the example given in the
1951
+ `DistributedVirtualSwitch` resource by
1952
+ adding the `DistributedPortGroup` resource, attaching itself to the
1953
+ vSphere Distributed Switch and assigning VLAN ID 1000.
1954
+
1955
+ ```python
1956
+ import pulumi
1957
+ import pulumi_vsphere as vsphere
1958
+
1959
+ config = pulumi.Config()
1960
+ hosts = config.get_object("hosts")
1961
+ if hosts is None:
1962
+ hosts = [
1963
+ "esxi-01.example.com",
1964
+ "esxi-02.example.com",
1965
+ "esxi-03.example.com",
1966
+ ]
1967
+ network_interfaces = config.get_object("networkInterfaces")
1968
+ if network_interfaces is None:
1969
+ network_interfaces = [
1970
+ "vmnic0",
1971
+ "vmnic1",
1972
+ "vmnic2",
1973
+ "vmnic3",
1974
+ ]
1975
+ datacenter = vsphere.get_datacenter(name="dc-01")
1976
+ host = [vsphere.get_host(name=hosts[__index],
1977
+ datacenter_id=datacenter.id) for __index in range(len(hosts))]
1978
+ vds = vsphere.DistributedVirtualSwitch("vds",
1979
+ name="vds-01",
1980
+ datacenter_id=datacenter.id,
1981
+ uplinks=[
1982
+ "uplink1",
1983
+ "uplink2",
1984
+ "uplink3",
1985
+ "uplink4",
1986
+ ],
1987
+ active_uplinks=[
1988
+ "uplink1",
1989
+ "uplink2",
1990
+ ],
1991
+ standby_uplinks=[
1992
+ "uplink3",
1993
+ "uplink4",
1994
+ ],
1995
+ hosts=[
1996
+ {
1997
+ "host_system_id": host[0].id,
1998
+ "devices": [network_interfaces],
1999
+ },
2000
+ {
2001
+ "host_system_id": host[1].id,
2002
+ "devices": [network_interfaces],
2003
+ },
2004
+ {
2005
+ "host_system_id": host[2].id,
2006
+ "devices": [network_interfaces],
2007
+ },
2008
+ ])
2009
+ pg = vsphere.DistributedPortGroup("pg",
2010
+ name="pg-01",
2011
+ distributed_virtual_switch_uuid=vds.id,
2012
+ vlan_id=1000)
2013
+ ```
2014
+
2015
+ ### Overriding VDS policies
2016
+
2017
+ All of the default port policies available in the
2018
+ `DistributedVirtualSwitch` resource can be overridden on the port
2019
+ group level by specifying new settings for them.
2020
+
2021
+ As an example, we also take this example from the
2022
+ `DistributedVirtualSwitch` resource where we manually specify our
2023
+ uplink count and uplink order. While the vSphere Distributed Switch has a
2024
+ default policy of using the first uplink as an active uplink and the second
2025
+ one as a standby, the overridden port group policy means that both uplinks
2026
+ will be used as active uplinks in this specific port group.
2027
+
2028
+ ```python
2029
+ import pulumi
2030
+ import pulumi_vsphere as vsphere
2031
+
2032
+ vds = vsphere.DistributedVirtualSwitch("vds",
2033
+ name="vds-01",
2034
+ datacenter_id=datacenter["id"],
2035
+ uplinks=[
2036
+ "uplink1",
2037
+ "uplink2",
2038
+ ],
2039
+ active_uplinks=["uplink1"],
2040
+ standby_uplinks=["uplink2"])
2041
+ pg = vsphere.DistributedPortGroup("pg",
2042
+ name="pg-01",
2043
+ distributed_virtual_switch_uuid=vds.id,
2044
+ vlan_id=1000,
2045
+ active_uplinks=[
2046
+ "uplink1",
2047
+ "uplink2",
2048
+ ],
2049
+ standby_uplinks=[])
2050
+ ```
2051
+
2052
+ ## Import
2053
+
2054
+ An existing port group can be imported into this resource using
2055
+
2056
+ the managed object id of the port group, via the following command:
2057
+
2058
+ ```sh
2059
+ $ pulumi import vsphere:index/distributedPortGroup:DistributedPortGroup pg /dc-01/network/pg-01
2060
+ ```
2061
+
2062
+ The above would import the port group named `pg-01` that is located in the `dc-01`
2063
+
2064
+ datacenter.
2065
+
1788
2066
  :param str resource_name: The name of the resource.
1789
2067
  :param DistributedPortGroupArgs args: The arguments to use to populate this resource's properties.
1790
2068
  :param pulumi.ResourceOptions opts: Options for the resource.