pulumi-vsphere 4.10.0a1710160860__py3-none-any.whl → 4.13.0a1736836157__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-vsphere might be problematic. Click here for more details.
- pulumi_vsphere/__init__.py +30 -0
- pulumi_vsphere/_inputs.py +1816 -277
- pulumi_vsphere/_utilities.py +41 -5
- pulumi_vsphere/compute_cluster.py +937 -1488
- pulumi_vsphere/compute_cluster_host_group.py +67 -2
- pulumi_vsphere/compute_cluster_vm_affinity_rule.py +69 -34
- pulumi_vsphere/compute_cluster_vm_anti_affinity_rule.py +41 -2
- pulumi_vsphere/compute_cluster_vm_dependency_rule.py +205 -2
- pulumi_vsphere/compute_cluster_vm_group.py +198 -2
- pulumi_vsphere/compute_cluster_vm_host_rule.py +73 -2
- pulumi_vsphere/config/__init__.pyi +5 -0
- pulumi_vsphere/config/vars.py +5 -0
- pulumi_vsphere/content_library.py +113 -12
- pulumi_vsphere/content_library_item.py +143 -2
- pulumi_vsphere/custom_attribute.py +77 -2
- pulumi_vsphere/datacenter.py +48 -40
- pulumi_vsphere/datastore_cluster.py +217 -366
- pulumi_vsphere/datastore_cluster_vm_anti_affinity_rule.py +159 -2
- pulumi_vsphere/distributed_port_group.py +416 -189
- pulumi_vsphere/distributed_virtual_switch.py +571 -830
- pulumi_vsphere/dpm_host_override.py +63 -2
- pulumi_vsphere/drs_vm_override.py +67 -2
- pulumi_vsphere/entity_permissions.py +64 -38
- pulumi_vsphere/file.py +21 -24
- pulumi_vsphere/folder.py +148 -30
- pulumi_vsphere/get_compute_cluster.py +16 -9
- pulumi_vsphere/get_compute_cluster_host_group.py +36 -25
- pulumi_vsphere/get_content_library.py +23 -15
- pulumi_vsphere/get_content_library_item.py +29 -13
- pulumi_vsphere/get_custom_attribute.py +14 -9
- pulumi_vsphere/get_datacenter.py +30 -12
- pulumi_vsphere/get_datastore.py +44 -12
- pulumi_vsphere/get_datastore_cluster.py +31 -10
- pulumi_vsphere/get_datastore_stats.py +208 -0
- pulumi_vsphere/get_distributed_virtual_switch.py +18 -9
- pulumi_vsphere/get_dynamic.py +35 -25
- pulumi_vsphere/get_folder.py +23 -11
- pulumi_vsphere/get_guest_os_customization.py +26 -52
- pulumi_vsphere/get_host.py +16 -9
- pulumi_vsphere/get_host_base_images.py +104 -0
- pulumi_vsphere/get_host_pci_device.py +28 -19
- pulumi_vsphere/get_host_thumbprint.py +41 -25
- pulumi_vsphere/get_host_vgpu_profile.py +195 -0
- pulumi_vsphere/get_license.py +20 -10
- pulumi_vsphere/get_network.py +80 -24
- pulumi_vsphere/get_ovf_vm_template.py +56 -5
- pulumi_vsphere/get_policy.py +13 -9
- pulumi_vsphere/get_resource_pool.py +29 -23
- pulumi_vsphere/get_role.py +23 -13
- pulumi_vsphere/get_tag.py +16 -9
- pulumi_vsphere/get_tag_category.py +16 -9
- pulumi_vsphere/get_vapp_container.py +15 -9
- pulumi_vsphere/get_virtual_machine.py +233 -48
- pulumi_vsphere/get_vmfs_disks.py +18 -9
- pulumi_vsphere/guest_os_customization.py +60 -5
- pulumi_vsphere/ha_vm_override.py +352 -380
- pulumi_vsphere/host.py +244 -64
- pulumi_vsphere/host_port_group.py +27 -24
- pulumi_vsphere/host_virtual_switch.py +209 -289
- pulumi_vsphere/license.py +5 -32
- pulumi_vsphere/nas_datastore.py +74 -9
- pulumi_vsphere/offline_software_depot.py +185 -0
- pulumi_vsphere/outputs.py +774 -256
- pulumi_vsphere/provider.py +7 -6
- pulumi_vsphere/pulumi-plugin.json +2 -1
- pulumi_vsphere/resource_pool.py +168 -411
- pulumi_vsphere/role.py +33 -2
- pulumi_vsphere/storage_drs_vm_override.py +133 -2
- pulumi_vsphere/supervisor.py +967 -0
- pulumi_vsphere/tag.py +159 -2
- pulumi_vsphere/tag_category.py +83 -2
- pulumi_vsphere/vapp_container.py +163 -2
- pulumi_vsphere/vapp_entity.py +147 -2
- pulumi_vsphere/virtual_disk.py +123 -36
- pulumi_vsphere/virtual_machine.py +759 -829
- pulumi_vsphere/virtual_machine_class.py +447 -0
- pulumi_vsphere/virtual_machine_snapshot.py +13 -12
- pulumi_vsphere/vm_storage_policy.py +120 -127
- pulumi_vsphere/vmfs_datastore.py +271 -2
- pulumi_vsphere/vnic.py +104 -105
- {pulumi_vsphere-4.10.0a1710160860.dist-info → pulumi_vsphere-4.13.0a1736836157.dist-info}/METADATA +7 -6
- pulumi_vsphere-4.13.0a1736836157.dist-info/RECORD +86 -0
- {pulumi_vsphere-4.10.0a1710160860.dist-info → pulumi_vsphere-4.13.0a1736836157.dist-info}/WHEEL +1 -1
- pulumi_vsphere-4.10.0a1710160860.dist-info/RECORD +0 -80
- {pulumi_vsphere-4.10.0a1710160860.dist-info → pulumi_vsphere-4.13.0a1736836157.dist-info}/top_level.txt +0 -0
pulumi_vsphere/vmfs_datastore.py
CHANGED
|
@@ -4,9 +4,14 @@
|
|
|
4
4
|
|
|
5
5
|
import copy
|
|
6
6
|
import warnings
|
|
7
|
+
import sys
|
|
7
8
|
import pulumi
|
|
8
9
|
import pulumi.runtime
|
|
9
10
|
from typing import Any, Mapping, Optional, Sequence, Union, overload
|
|
11
|
+
if sys.version_info >= (3, 11):
|
|
12
|
+
from typing import NotRequired, TypedDict, TypeAlias
|
|
13
|
+
else:
|
|
14
|
+
from typing_extensions import NotRequired, TypedDict, TypeAlias
|
|
10
15
|
from . import _utilities
|
|
11
16
|
|
|
12
17
|
__all__ = ['VmfsDatastoreArgs', 'VmfsDatastore']
|
|
@@ -461,7 +466,139 @@ class VmfsDatastore(pulumi.CustomResource):
|
|
|
461
466
|
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
462
467
|
__props__=None):
|
|
463
468
|
"""
|
|
464
|
-
|
|
469
|
+
The `VmfsDatastore` resource can be used to create and manage VMFS
|
|
470
|
+
datastores on an ESXi host or a set of hosts. The resource supports using any
|
|
471
|
+
SCSI device that can generally be used in a datastore, such as local disks, or
|
|
472
|
+
disks presented to a host or multiple hosts over Fibre Channel or iSCSI.
|
|
473
|
+
Devices can be specified manually, or discovered using the
|
|
474
|
+
[`get_vmfs_disks`][data-source-vmfs-disks] data source.
|
|
475
|
+
|
|
476
|
+
[data-source-vmfs-disks]: /docs/providers/vsphere/d/vmfs_disks.html
|
|
477
|
+
|
|
478
|
+
## Auto-Mounting of Datastores Within vCenter
|
|
479
|
+
|
|
480
|
+
Note that the current behavior of this resource will auto-mount any created
|
|
481
|
+
datastores to any other host within vCenter that has access to the same disk.
|
|
482
|
+
|
|
483
|
+
Example: You want to create a datastore with a iSCSI LUN that is visible on 3
|
|
484
|
+
hosts in a single vSphere cluster (`esxi1`, `esxi2` and `esxi3`). When you
|
|
485
|
+
create the datastore on `esxi1`, the datastore will be automatically mounted on
|
|
486
|
+
`esxi2` and `esxi3`, without the need to configure the resource on either of
|
|
487
|
+
those two hosts.
|
|
488
|
+
|
|
489
|
+
Future versions of this resource may allow you to control the hosts that a
|
|
490
|
+
datastore is mounted to, but currently, this automatic behavior cannot be
|
|
491
|
+
changed, so keep this in mind when writing your configurations and deploying
|
|
492
|
+
your disks.
|
|
493
|
+
|
|
494
|
+
## Increasing Datastore Size
|
|
495
|
+
|
|
496
|
+
To increase the size of a datastore, you must add additional disks to the
|
|
497
|
+
`disks` attribute. Expanding the size of a datastore by increasing the size of
|
|
498
|
+
an already provisioned disk is currently not supported (but may be in future
|
|
499
|
+
versions of this resource).
|
|
500
|
+
|
|
501
|
+
> **NOTE:** You cannot decrease the size of a datastore. If the resource
|
|
502
|
+
detects disks removed from the configuration, the provider will give an error.
|
|
503
|
+
|
|
504
|
+
[cmd-taint]: /docs/commands/taint.html
|
|
505
|
+
|
|
506
|
+
## Example Usage
|
|
507
|
+
|
|
508
|
+
### Addition of local disks on a single host
|
|
509
|
+
|
|
510
|
+
The following example uses the default datacenter and default host to add a
|
|
511
|
+
datastore with local disks to a single ESXi server.
|
|
512
|
+
|
|
513
|
+
> **NOTE:** There are some situations where datastore creation will not work
|
|
514
|
+
when working through vCenter (usually when trying to create a datastore on a
|
|
515
|
+
single host with local disks). If you experience trouble creating the datastore
|
|
516
|
+
you need through vCenter, break the datastore off into a different configuration
|
|
517
|
+
and deploy it using the ESXi server as the provider endpoint, using a similar
|
|
518
|
+
configuration to what is below.
|
|
519
|
+
|
|
520
|
+
```python
|
|
521
|
+
import pulumi
|
|
522
|
+
import pulumi_vsphere as vsphere
|
|
523
|
+
|
|
524
|
+
datacenter = vsphere.get_datacenter()
|
|
525
|
+
host = vsphere.get_host(datacenter_id=datacenter.id)
|
|
526
|
+
datastore = vsphere.VmfsDatastore("datastore",
|
|
527
|
+
name="test",
|
|
528
|
+
host_system_id=esxi_host["id"],
|
|
529
|
+
disks=[
|
|
530
|
+
"mpx.vmhba1:C0:T1:L0",
|
|
531
|
+
"mpx.vmhba1:C0:T2:L0",
|
|
532
|
+
"mpx.vmhba1:C0:T2:L0",
|
|
533
|
+
])
|
|
534
|
+
```
|
|
535
|
+
|
|
536
|
+
### Auto-detection of disks via `get_vmfs_disks`
|
|
537
|
+
|
|
538
|
+
The following example makes use of the
|
|
539
|
+
`get_vmfs_disks` data source to auto-detect
|
|
540
|
+
exported iSCSI LUNS matching a certain NAA vendor ID (in this case, LUNs
|
|
541
|
+
exported from a [NetApp][ext-netapp]). These discovered disks are then loaded
|
|
542
|
+
into `VmfsDatastore`. The datastore is also placed in the
|
|
543
|
+
`datastore-folder` folder afterwards.
|
|
544
|
+
|
|
545
|
+
[ext-netapp]: https://kb.netapp.com/support/s/article/ka31A0000000rLRQAY/how-to-match-a-lun-s-naa-number-to-its-serial-number?language=en_US
|
|
546
|
+
|
|
547
|
+
```python
|
|
548
|
+
import pulumi
|
|
549
|
+
import pulumi_vsphere as vsphere
|
|
550
|
+
|
|
551
|
+
datacenter = vsphere.get_datacenter(name="dc-01")
|
|
552
|
+
host = vsphere.get_host(name="esxi-01.example.com",
|
|
553
|
+
datacenter_id=datacenter.id)
|
|
554
|
+
available = vsphere.get_vmfs_disks(host_system_id=host.id,
|
|
555
|
+
rescan=True,
|
|
556
|
+
filter="naa.60a98000")
|
|
557
|
+
datastore = vsphere.VmfsDatastore("datastore",
|
|
558
|
+
name="test",
|
|
559
|
+
host_system_id=esxi_host["id"],
|
|
560
|
+
folder="datastore-folder",
|
|
561
|
+
disks=[available.disks])
|
|
562
|
+
```
|
|
563
|
+
|
|
564
|
+
## Import
|
|
565
|
+
|
|
566
|
+
An existing VMFS datastore can be imported into this resource
|
|
567
|
+
|
|
568
|
+
via its managed object ID, via the command below. You also need the host system
|
|
569
|
+
|
|
570
|
+
ID.
|
|
571
|
+
|
|
572
|
+
```sh
|
|
573
|
+
$ pulumi import vsphere:index/vmfsDatastore:VmfsDatastore datastore datastore-123:host-10
|
|
574
|
+
```
|
|
575
|
+
|
|
576
|
+
You need a tool like [`govc`][ext-govc] that can display managed object IDs.
|
|
577
|
+
|
|
578
|
+
[ext-govc]: https://github.com/vmware/govmomi/tree/master/govc
|
|
579
|
+
|
|
580
|
+
In the case of govc, you can locate a managed object ID from an inventory path
|
|
581
|
+
|
|
582
|
+
by doing the following:
|
|
583
|
+
|
|
584
|
+
$ govc ls -i /dc/datastore/terraform-test
|
|
585
|
+
|
|
586
|
+
Datastore:datastore-123
|
|
587
|
+
|
|
588
|
+
To locate host IDs, it might be a good idea to supply the `-l` flag as well so
|
|
589
|
+
|
|
590
|
+
that you can line up the names with the IDs:
|
|
591
|
+
|
|
592
|
+
$ govc ls -l -i /dc/host/cluster1
|
|
593
|
+
|
|
594
|
+
ResourcePool:resgroup-10 /dc/host/cluster1/Resources
|
|
595
|
+
|
|
596
|
+
HostSystem:host-10 /dc/host/cluster1/esxi1
|
|
597
|
+
|
|
598
|
+
HostSystem:host-11 /dc/host/cluster1/esxi2
|
|
599
|
+
|
|
600
|
+
HostSystem:host-12 /dc/host/cluster1/esxi3
|
|
601
|
+
|
|
465
602
|
:param str resource_name: The name of the resource.
|
|
466
603
|
:param pulumi.ResourceOptions opts: Options for the resource.
|
|
467
604
|
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: Map of custom attribute ids to attribute
|
|
@@ -499,7 +636,139 @@ class VmfsDatastore(pulumi.CustomResource):
|
|
|
499
636
|
args: VmfsDatastoreArgs,
|
|
500
637
|
opts: Optional[pulumi.ResourceOptions] = None):
|
|
501
638
|
"""
|
|
502
|
-
|
|
639
|
+
The `VmfsDatastore` resource can be used to create and manage VMFS
|
|
640
|
+
datastores on an ESXi host or a set of hosts. The resource supports using any
|
|
641
|
+
SCSI device that can generally be used in a datastore, such as local disks, or
|
|
642
|
+
disks presented to a host or multiple hosts over Fibre Channel or iSCSI.
|
|
643
|
+
Devices can be specified manually, or discovered using the
|
|
644
|
+
[`get_vmfs_disks`][data-source-vmfs-disks] data source.
|
|
645
|
+
|
|
646
|
+
[data-source-vmfs-disks]: /docs/providers/vsphere/d/vmfs_disks.html
|
|
647
|
+
|
|
648
|
+
## Auto-Mounting of Datastores Within vCenter
|
|
649
|
+
|
|
650
|
+
Note that the current behavior of this resource will auto-mount any created
|
|
651
|
+
datastores to any other host within vCenter that has access to the same disk.
|
|
652
|
+
|
|
653
|
+
Example: You want to create a datastore with a iSCSI LUN that is visible on 3
|
|
654
|
+
hosts in a single vSphere cluster (`esxi1`, `esxi2` and `esxi3`). When you
|
|
655
|
+
create the datastore on `esxi1`, the datastore will be automatically mounted on
|
|
656
|
+
`esxi2` and `esxi3`, without the need to configure the resource on either of
|
|
657
|
+
those two hosts.
|
|
658
|
+
|
|
659
|
+
Future versions of this resource may allow you to control the hosts that a
|
|
660
|
+
datastore is mounted to, but currently, this automatic behavior cannot be
|
|
661
|
+
changed, so keep this in mind when writing your configurations and deploying
|
|
662
|
+
your disks.
|
|
663
|
+
|
|
664
|
+
## Increasing Datastore Size
|
|
665
|
+
|
|
666
|
+
To increase the size of a datastore, you must add additional disks to the
|
|
667
|
+
`disks` attribute. Expanding the size of a datastore by increasing the size of
|
|
668
|
+
an already provisioned disk is currently not supported (but may be in future
|
|
669
|
+
versions of this resource).
|
|
670
|
+
|
|
671
|
+
> **NOTE:** You cannot decrease the size of a datastore. If the resource
|
|
672
|
+
detects disks removed from the configuration, the provider will give an error.
|
|
673
|
+
|
|
674
|
+
[cmd-taint]: /docs/commands/taint.html
|
|
675
|
+
|
|
676
|
+
## Example Usage
|
|
677
|
+
|
|
678
|
+
### Addition of local disks on a single host
|
|
679
|
+
|
|
680
|
+
The following example uses the default datacenter and default host to add a
|
|
681
|
+
datastore with local disks to a single ESXi server.
|
|
682
|
+
|
|
683
|
+
> **NOTE:** There are some situations where datastore creation will not work
|
|
684
|
+
when working through vCenter (usually when trying to create a datastore on a
|
|
685
|
+
single host with local disks). If you experience trouble creating the datastore
|
|
686
|
+
you need through vCenter, break the datastore off into a different configuration
|
|
687
|
+
and deploy it using the ESXi server as the provider endpoint, using a similar
|
|
688
|
+
configuration to what is below.
|
|
689
|
+
|
|
690
|
+
```python
|
|
691
|
+
import pulumi
|
|
692
|
+
import pulumi_vsphere as vsphere
|
|
693
|
+
|
|
694
|
+
datacenter = vsphere.get_datacenter()
|
|
695
|
+
host = vsphere.get_host(datacenter_id=datacenter.id)
|
|
696
|
+
datastore = vsphere.VmfsDatastore("datastore",
|
|
697
|
+
name="test",
|
|
698
|
+
host_system_id=esxi_host["id"],
|
|
699
|
+
disks=[
|
|
700
|
+
"mpx.vmhba1:C0:T1:L0",
|
|
701
|
+
"mpx.vmhba1:C0:T2:L0",
|
|
702
|
+
"mpx.vmhba1:C0:T2:L0",
|
|
703
|
+
])
|
|
704
|
+
```
|
|
705
|
+
|
|
706
|
+
### Auto-detection of disks via `get_vmfs_disks`
|
|
707
|
+
|
|
708
|
+
The following example makes use of the
|
|
709
|
+
`get_vmfs_disks` data source to auto-detect
|
|
710
|
+
exported iSCSI LUNS matching a certain NAA vendor ID (in this case, LUNs
|
|
711
|
+
exported from a [NetApp][ext-netapp]). These discovered disks are then loaded
|
|
712
|
+
into `VmfsDatastore`. The datastore is also placed in the
|
|
713
|
+
`datastore-folder` folder afterwards.
|
|
714
|
+
|
|
715
|
+
[ext-netapp]: https://kb.netapp.com/support/s/article/ka31A0000000rLRQAY/how-to-match-a-lun-s-naa-number-to-its-serial-number?language=en_US
|
|
716
|
+
|
|
717
|
+
```python
|
|
718
|
+
import pulumi
|
|
719
|
+
import pulumi_vsphere as vsphere
|
|
720
|
+
|
|
721
|
+
datacenter = vsphere.get_datacenter(name="dc-01")
|
|
722
|
+
host = vsphere.get_host(name="esxi-01.example.com",
|
|
723
|
+
datacenter_id=datacenter.id)
|
|
724
|
+
available = vsphere.get_vmfs_disks(host_system_id=host.id,
|
|
725
|
+
rescan=True,
|
|
726
|
+
filter="naa.60a98000")
|
|
727
|
+
datastore = vsphere.VmfsDatastore("datastore",
|
|
728
|
+
name="test",
|
|
729
|
+
host_system_id=esxi_host["id"],
|
|
730
|
+
folder="datastore-folder",
|
|
731
|
+
disks=[available.disks])
|
|
732
|
+
```
|
|
733
|
+
|
|
734
|
+
## Import
|
|
735
|
+
|
|
736
|
+
An existing VMFS datastore can be imported into this resource
|
|
737
|
+
|
|
738
|
+
via its managed object ID, via the command below. You also need the host system
|
|
739
|
+
|
|
740
|
+
ID.
|
|
741
|
+
|
|
742
|
+
```sh
|
|
743
|
+
$ pulumi import vsphere:index/vmfsDatastore:VmfsDatastore datastore datastore-123:host-10
|
|
744
|
+
```
|
|
745
|
+
|
|
746
|
+
You need a tool like [`govc`][ext-govc] that can display managed object IDs.
|
|
747
|
+
|
|
748
|
+
[ext-govc]: https://github.com/vmware/govmomi/tree/master/govc
|
|
749
|
+
|
|
750
|
+
In the case of govc, you can locate a managed object ID from an inventory path
|
|
751
|
+
|
|
752
|
+
by doing the following:
|
|
753
|
+
|
|
754
|
+
$ govc ls -i /dc/datastore/terraform-test
|
|
755
|
+
|
|
756
|
+
Datastore:datastore-123
|
|
757
|
+
|
|
758
|
+
To locate host IDs, it might be a good idea to supply the `-l` flag as well so
|
|
759
|
+
|
|
760
|
+
that you can line up the names with the IDs:
|
|
761
|
+
|
|
762
|
+
$ govc ls -l -i /dc/host/cluster1
|
|
763
|
+
|
|
764
|
+
ResourcePool:resgroup-10 /dc/host/cluster1/Resources
|
|
765
|
+
|
|
766
|
+
HostSystem:host-10 /dc/host/cluster1/esxi1
|
|
767
|
+
|
|
768
|
+
HostSystem:host-11 /dc/host/cluster1/esxi2
|
|
769
|
+
|
|
770
|
+
HostSystem:host-12 /dc/host/cluster1/esxi3
|
|
771
|
+
|
|
503
772
|
:param str resource_name: The name of the resource.
|
|
504
773
|
:param VmfsDatastoreArgs args: The arguments to use to populate this resource's properties.
|
|
505
774
|
:param pulumi.ResourceOptions opts: Options for the resource.
|
pulumi_vsphere/vnic.py
CHANGED
|
@@ -4,9 +4,14 @@
|
|
|
4
4
|
|
|
5
5
|
import copy
|
|
6
6
|
import warnings
|
|
7
|
+
import sys
|
|
7
8
|
import pulumi
|
|
8
9
|
import pulumi.runtime
|
|
9
10
|
from typing import Any, Mapping, Optional, Sequence, Union, overload
|
|
11
|
+
if sys.version_info >= (3, 11):
|
|
12
|
+
from typing import NotRequired, TypedDict, TypeAlias
|
|
13
|
+
else:
|
|
14
|
+
from typing_extensions import NotRequired, TypedDict, TypeAlias
|
|
10
15
|
from . import _utilities
|
|
11
16
|
from . import outputs
|
|
12
17
|
from ._inputs import *
|
|
@@ -30,7 +35,7 @@ class VnicArgs:
|
|
|
30
35
|
The set of arguments for constructing a Vnic resource.
|
|
31
36
|
:param pulumi.Input[str] host: ESX host the interface belongs to
|
|
32
37
|
:param pulumi.Input[str] distributed_port_group: Key of the distributed portgroup the nic will connect to.
|
|
33
|
-
:param pulumi.Input[str] distributed_switch_port: UUID of the
|
|
38
|
+
:param pulumi.Input[str] distributed_switch_port: UUID of the vdswitch the nic will be attached to. Do not set if you set portgroup.
|
|
34
39
|
:param pulumi.Input['VnicIpv4Args'] ipv4: IPv4 settings. Either this or `ipv6` needs to be set. See IPv4 options below.
|
|
35
40
|
:param pulumi.Input['VnicIpv6Args'] ipv6: IPv6 settings. Either this or `ipv6` needs to be set. See IPv6 options below.
|
|
36
41
|
:param pulumi.Input[str] mac: MAC address of the interface.
|
|
@@ -87,7 +92,7 @@ class VnicArgs:
|
|
|
87
92
|
@pulumi.getter(name="distributedSwitchPort")
|
|
88
93
|
def distributed_switch_port(self) -> Optional[pulumi.Input[str]]:
|
|
89
94
|
"""
|
|
90
|
-
UUID of the
|
|
95
|
+
UUID of the vdswitch the nic will be attached to. Do not set if you set portgroup.
|
|
91
96
|
"""
|
|
92
97
|
return pulumi.get(self, "distributed_switch_port")
|
|
93
98
|
|
|
@@ -196,7 +201,7 @@ class _VnicState:
|
|
|
196
201
|
"""
|
|
197
202
|
Input properties used for looking up and filtering Vnic resources.
|
|
198
203
|
:param pulumi.Input[str] distributed_port_group: Key of the distributed portgroup the nic will connect to.
|
|
199
|
-
:param pulumi.Input[str] distributed_switch_port: UUID of the
|
|
204
|
+
:param pulumi.Input[str] distributed_switch_port: UUID of the vdswitch the nic will be attached to. Do not set if you set portgroup.
|
|
200
205
|
:param pulumi.Input[str] host: ESX host the interface belongs to
|
|
201
206
|
:param pulumi.Input['VnicIpv4Args'] ipv4: IPv4 settings. Either this or `ipv6` needs to be set. See IPv4 options below.
|
|
202
207
|
:param pulumi.Input['VnicIpv6Args'] ipv6: IPv6 settings. Either this or `ipv6` needs to be set. See IPv6 options below.
|
|
@@ -243,7 +248,7 @@ class _VnicState:
|
|
|
243
248
|
@pulumi.getter(name="distributedSwitchPort")
|
|
244
249
|
def distributed_switch_port(self) -> Optional[pulumi.Input[str]]:
|
|
245
250
|
"""
|
|
246
|
-
UUID of the
|
|
251
|
+
UUID of the vdswitch the nic will be attached to. Do not set if you set portgroup.
|
|
247
252
|
"""
|
|
248
253
|
return pulumi.get(self, "distributed_switch_port")
|
|
249
254
|
|
|
@@ -356,8 +361,8 @@ class Vnic(pulumi.CustomResource):
|
|
|
356
361
|
distributed_port_group: Optional[pulumi.Input[str]] = None,
|
|
357
362
|
distributed_switch_port: Optional[pulumi.Input[str]] = None,
|
|
358
363
|
host: Optional[pulumi.Input[str]] = None,
|
|
359
|
-
ipv4: Optional[pulumi.Input[
|
|
360
|
-
ipv6: Optional[pulumi.Input[
|
|
364
|
+
ipv4: Optional[pulumi.Input[Union['VnicIpv4Args', 'VnicIpv4ArgsDict']]] = None,
|
|
365
|
+
ipv6: Optional[pulumi.Input[Union['VnicIpv6Args', 'VnicIpv6ArgsDict']]] = None,
|
|
361
366
|
mac: Optional[pulumi.Input[str]] = None,
|
|
362
367
|
mtu: Optional[pulumi.Input[int]] = None,
|
|
363
368
|
netstack: Optional[pulumi.Input[str]] = None,
|
|
@@ -369,94 +374,91 @@ class Vnic(pulumi.CustomResource):
|
|
|
369
374
|
|
|
370
375
|
## Example Usage
|
|
371
376
|
|
|
372
|
-
### S
|
|
373
|
-
|
|
374
377
|
### Create a vnic attached to a distributed virtual switch using the vmotion TCP/IP stack
|
|
375
378
|
|
|
376
|
-
<!--Start PulumiCodeChooser -->
|
|
377
379
|
```python
|
|
378
380
|
import pulumi
|
|
379
381
|
import pulumi_vsphere as vsphere
|
|
380
382
|
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
datacenter_id=
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
383
|
+
datacenter = vsphere.get_datacenter(name="dc-01")
|
|
384
|
+
host = vsphere.get_host(name="esxi-01.example.com",
|
|
385
|
+
datacenter_id=datacenter.id)
|
|
386
|
+
vds = vsphere.DistributedVirtualSwitch("vds",
|
|
387
|
+
name="vds-01",
|
|
388
|
+
datacenter_id=datacenter.id,
|
|
389
|
+
hosts=[{
|
|
390
|
+
"host_system_id": host.id,
|
|
391
|
+
"devices": ["vnic3"],
|
|
392
|
+
}])
|
|
393
|
+
pg = vsphere.DistributedPortGroup("pg",
|
|
394
|
+
name="pg-01",
|
|
391
395
|
vlan_id=1234,
|
|
392
|
-
distributed_virtual_switch_uuid=
|
|
393
|
-
|
|
394
|
-
host=
|
|
395
|
-
distributed_switch_port=
|
|
396
|
-
distributed_port_group=
|
|
397
|
-
ipv4=
|
|
398
|
-
dhcp
|
|
399
|
-
|
|
396
|
+
distributed_virtual_switch_uuid=vds.id)
|
|
397
|
+
vnic = vsphere.Vnic("vnic",
|
|
398
|
+
host=host.id,
|
|
399
|
+
distributed_switch_port=vds.id,
|
|
400
|
+
distributed_port_group=pg.id,
|
|
401
|
+
ipv4={
|
|
402
|
+
"dhcp": True,
|
|
403
|
+
},
|
|
400
404
|
netstack="vmotion")
|
|
401
405
|
```
|
|
402
|
-
<!--End PulumiCodeChooser -->
|
|
403
406
|
|
|
404
407
|
### Create a vnic attached to a portgroup using the default TCP/IP stack
|
|
405
408
|
|
|
406
|
-
<!--Start PulumiCodeChooser -->
|
|
407
409
|
```python
|
|
408
410
|
import pulumi
|
|
409
411
|
import pulumi_vsphere as vsphere
|
|
410
412
|
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
datacenter_id=
|
|
414
|
-
|
|
415
|
-
|
|
413
|
+
datacenter = vsphere.get_datacenter(name="dc-01")
|
|
414
|
+
host = vsphere.get_host(name="esxi-01.example.com",
|
|
415
|
+
datacenter_id=datacenter.id)
|
|
416
|
+
hvs = vsphere.HostVirtualSwitch("hvs",
|
|
417
|
+
name="hvs-01",
|
|
418
|
+
host_system_id=host.id,
|
|
416
419
|
network_adapters=[
|
|
417
420
|
"vmnic3",
|
|
418
421
|
"vmnic4",
|
|
419
422
|
],
|
|
420
423
|
active_nics=["vmnic3"],
|
|
421
424
|
standby_nics=["vmnic4"])
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
425
|
+
pg = vsphere.HostPortGroup("pg",
|
|
426
|
+
name="pg-01",
|
|
427
|
+
virtual_switch_name=hvs.name,
|
|
428
|
+
host_system_id=host.id)
|
|
429
|
+
vnic = vsphere.Vnic("vnic",
|
|
430
|
+
host=host.id,
|
|
431
|
+
portgroup=pg.name,
|
|
432
|
+
ipv4={
|
|
433
|
+
"dhcp": True,
|
|
434
|
+
},
|
|
431
435
|
services=[
|
|
432
436
|
"vsan",
|
|
433
437
|
"management",
|
|
434
438
|
])
|
|
435
439
|
```
|
|
436
|
-
<!--End PulumiCodeChooser -->
|
|
437
440
|
|
|
438
|
-
##
|
|
441
|
+
## Import
|
|
442
|
+
|
|
443
|
+
An existing vNic can be imported into this resource
|
|
439
444
|
|
|
440
|
-
An existing vNic can be [imported][docs-import] into this resource
|
|
441
445
|
via supplying the vNic's ID. An example is below:
|
|
442
446
|
|
|
443
447
|
[docs-import]: /docs/import/index.html
|
|
444
448
|
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
import pulumi
|
|
449
|
+
```sh
|
|
450
|
+
$ pulumi import vsphere:index/vnic:Vnic vnic host-123_vmk2
|
|
448
451
|
```
|
|
449
|
-
<!--End PulumiCodeChooser -->
|
|
450
452
|
|
|
451
453
|
The above would import the vnic `vmk2` from host with ID `host-123`.
|
|
452
454
|
|
|
453
455
|
:param str resource_name: The name of the resource.
|
|
454
456
|
:param pulumi.ResourceOptions opts: Options for the resource.
|
|
455
457
|
:param pulumi.Input[str] distributed_port_group: Key of the distributed portgroup the nic will connect to.
|
|
456
|
-
:param pulumi.Input[str] distributed_switch_port: UUID of the
|
|
458
|
+
:param pulumi.Input[str] distributed_switch_port: UUID of the vdswitch the nic will be attached to. Do not set if you set portgroup.
|
|
457
459
|
:param pulumi.Input[str] host: ESX host the interface belongs to
|
|
458
|
-
:param pulumi.Input[
|
|
459
|
-
:param pulumi.Input[
|
|
460
|
+
:param pulumi.Input[Union['VnicIpv4Args', 'VnicIpv4ArgsDict']] ipv4: IPv4 settings. Either this or `ipv6` needs to be set. See IPv4 options below.
|
|
461
|
+
:param pulumi.Input[Union['VnicIpv6Args', 'VnicIpv6ArgsDict']] ipv6: IPv6 settings. Either this or `ipv6` needs to be set. See IPv6 options below.
|
|
460
462
|
:param pulumi.Input[str] mac: MAC address of the interface.
|
|
461
463
|
:param pulumi.Input[int] mtu: MTU of the interface.
|
|
462
464
|
:param pulumi.Input[str] netstack: TCP/IP stack setting for this interface. Possible values are `defaultTcpipStack``, 'vmotion', 'vSphereProvisioning'. Changing this will force the creation of a new interface since it's not possible to change the stack once it gets created. (Default:`defaultTcpipStack`)
|
|
@@ -474,84 +476,81 @@ class Vnic(pulumi.CustomResource):
|
|
|
474
476
|
|
|
475
477
|
## Example Usage
|
|
476
478
|
|
|
477
|
-
### S
|
|
478
|
-
|
|
479
479
|
### Create a vnic attached to a distributed virtual switch using the vmotion TCP/IP stack
|
|
480
480
|
|
|
481
|
-
<!--Start PulumiCodeChooser -->
|
|
482
481
|
```python
|
|
483
482
|
import pulumi
|
|
484
483
|
import pulumi_vsphere as vsphere
|
|
485
484
|
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
datacenter_id=
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
485
|
+
datacenter = vsphere.get_datacenter(name="dc-01")
|
|
486
|
+
host = vsphere.get_host(name="esxi-01.example.com",
|
|
487
|
+
datacenter_id=datacenter.id)
|
|
488
|
+
vds = vsphere.DistributedVirtualSwitch("vds",
|
|
489
|
+
name="vds-01",
|
|
490
|
+
datacenter_id=datacenter.id,
|
|
491
|
+
hosts=[{
|
|
492
|
+
"host_system_id": host.id,
|
|
493
|
+
"devices": ["vnic3"],
|
|
494
|
+
}])
|
|
495
|
+
pg = vsphere.DistributedPortGroup("pg",
|
|
496
|
+
name="pg-01",
|
|
496
497
|
vlan_id=1234,
|
|
497
|
-
distributed_virtual_switch_uuid=
|
|
498
|
-
|
|
499
|
-
host=
|
|
500
|
-
distributed_switch_port=
|
|
501
|
-
distributed_port_group=
|
|
502
|
-
ipv4=
|
|
503
|
-
dhcp
|
|
504
|
-
|
|
498
|
+
distributed_virtual_switch_uuid=vds.id)
|
|
499
|
+
vnic = vsphere.Vnic("vnic",
|
|
500
|
+
host=host.id,
|
|
501
|
+
distributed_switch_port=vds.id,
|
|
502
|
+
distributed_port_group=pg.id,
|
|
503
|
+
ipv4={
|
|
504
|
+
"dhcp": True,
|
|
505
|
+
},
|
|
505
506
|
netstack="vmotion")
|
|
506
507
|
```
|
|
507
|
-
<!--End PulumiCodeChooser -->
|
|
508
508
|
|
|
509
509
|
### Create a vnic attached to a portgroup using the default TCP/IP stack
|
|
510
510
|
|
|
511
|
-
<!--Start PulumiCodeChooser -->
|
|
512
511
|
```python
|
|
513
512
|
import pulumi
|
|
514
513
|
import pulumi_vsphere as vsphere
|
|
515
514
|
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
datacenter_id=
|
|
519
|
-
|
|
520
|
-
|
|
515
|
+
datacenter = vsphere.get_datacenter(name="dc-01")
|
|
516
|
+
host = vsphere.get_host(name="esxi-01.example.com",
|
|
517
|
+
datacenter_id=datacenter.id)
|
|
518
|
+
hvs = vsphere.HostVirtualSwitch("hvs",
|
|
519
|
+
name="hvs-01",
|
|
520
|
+
host_system_id=host.id,
|
|
521
521
|
network_adapters=[
|
|
522
522
|
"vmnic3",
|
|
523
523
|
"vmnic4",
|
|
524
524
|
],
|
|
525
525
|
active_nics=["vmnic3"],
|
|
526
526
|
standby_nics=["vmnic4"])
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
527
|
+
pg = vsphere.HostPortGroup("pg",
|
|
528
|
+
name="pg-01",
|
|
529
|
+
virtual_switch_name=hvs.name,
|
|
530
|
+
host_system_id=host.id)
|
|
531
|
+
vnic = vsphere.Vnic("vnic",
|
|
532
|
+
host=host.id,
|
|
533
|
+
portgroup=pg.name,
|
|
534
|
+
ipv4={
|
|
535
|
+
"dhcp": True,
|
|
536
|
+
},
|
|
536
537
|
services=[
|
|
537
538
|
"vsan",
|
|
538
539
|
"management",
|
|
539
540
|
])
|
|
540
541
|
```
|
|
541
|
-
<!--End PulumiCodeChooser -->
|
|
542
542
|
|
|
543
|
-
##
|
|
543
|
+
## Import
|
|
544
|
+
|
|
545
|
+
An existing vNic can be imported into this resource
|
|
544
546
|
|
|
545
|
-
An existing vNic can be [imported][docs-import] into this resource
|
|
546
547
|
via supplying the vNic's ID. An example is below:
|
|
547
548
|
|
|
548
549
|
[docs-import]: /docs/import/index.html
|
|
549
550
|
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
import pulumi
|
|
551
|
+
```sh
|
|
552
|
+
$ pulumi import vsphere:index/vnic:Vnic vnic host-123_vmk2
|
|
553
553
|
```
|
|
554
|
-
<!--End PulumiCodeChooser -->
|
|
555
554
|
|
|
556
555
|
The above would import the vnic `vmk2` from host with ID `host-123`.
|
|
557
556
|
|
|
@@ -573,8 +572,8 @@ class Vnic(pulumi.CustomResource):
|
|
|
573
572
|
distributed_port_group: Optional[pulumi.Input[str]] = None,
|
|
574
573
|
distributed_switch_port: Optional[pulumi.Input[str]] = None,
|
|
575
574
|
host: Optional[pulumi.Input[str]] = None,
|
|
576
|
-
ipv4: Optional[pulumi.Input[
|
|
577
|
-
ipv6: Optional[pulumi.Input[
|
|
575
|
+
ipv4: Optional[pulumi.Input[Union['VnicIpv4Args', 'VnicIpv4ArgsDict']]] = None,
|
|
576
|
+
ipv6: Optional[pulumi.Input[Union['VnicIpv6Args', 'VnicIpv6ArgsDict']]] = None,
|
|
578
577
|
mac: Optional[pulumi.Input[str]] = None,
|
|
579
578
|
mtu: Optional[pulumi.Input[int]] = None,
|
|
580
579
|
netstack: Optional[pulumi.Input[str]] = None,
|
|
@@ -614,8 +613,8 @@ class Vnic(pulumi.CustomResource):
|
|
|
614
613
|
distributed_port_group: Optional[pulumi.Input[str]] = None,
|
|
615
614
|
distributed_switch_port: Optional[pulumi.Input[str]] = None,
|
|
616
615
|
host: Optional[pulumi.Input[str]] = None,
|
|
617
|
-
ipv4: Optional[pulumi.Input[
|
|
618
|
-
ipv6: Optional[pulumi.Input[
|
|
616
|
+
ipv4: Optional[pulumi.Input[Union['VnicIpv4Args', 'VnicIpv4ArgsDict']]] = None,
|
|
617
|
+
ipv6: Optional[pulumi.Input[Union['VnicIpv6Args', 'VnicIpv6ArgsDict']]] = None,
|
|
619
618
|
mac: Optional[pulumi.Input[str]] = None,
|
|
620
619
|
mtu: Optional[pulumi.Input[int]] = None,
|
|
621
620
|
netstack: Optional[pulumi.Input[str]] = None,
|
|
@@ -629,10 +628,10 @@ class Vnic(pulumi.CustomResource):
|
|
|
629
628
|
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
|
|
630
629
|
:param pulumi.ResourceOptions opts: Options for the resource.
|
|
631
630
|
:param pulumi.Input[str] distributed_port_group: Key of the distributed portgroup the nic will connect to.
|
|
632
|
-
:param pulumi.Input[str] distributed_switch_port: UUID of the
|
|
631
|
+
:param pulumi.Input[str] distributed_switch_port: UUID of the vdswitch the nic will be attached to. Do not set if you set portgroup.
|
|
633
632
|
:param pulumi.Input[str] host: ESX host the interface belongs to
|
|
634
|
-
:param pulumi.Input[
|
|
635
|
-
:param pulumi.Input[
|
|
633
|
+
:param pulumi.Input[Union['VnicIpv4Args', 'VnicIpv4ArgsDict']] ipv4: IPv4 settings. Either this or `ipv6` needs to be set. See IPv4 options below.
|
|
634
|
+
:param pulumi.Input[Union['VnicIpv6Args', 'VnicIpv6ArgsDict']] ipv6: IPv6 settings. Either this or `ipv6` needs to be set. See IPv6 options below.
|
|
636
635
|
:param pulumi.Input[str] mac: MAC address of the interface.
|
|
637
636
|
:param pulumi.Input[int] mtu: MTU of the interface.
|
|
638
637
|
:param pulumi.Input[str] netstack: TCP/IP stack setting for this interface. Possible values are `defaultTcpipStack``, 'vmotion', 'vSphereProvisioning'. Changing this will force the creation of a new interface since it's not possible to change the stack once it gets created. (Default:`defaultTcpipStack`)
|
|
@@ -667,7 +666,7 @@ class Vnic(pulumi.CustomResource):
|
|
|
667
666
|
@pulumi.getter(name="distributedSwitchPort")
|
|
668
667
|
def distributed_switch_port(self) -> pulumi.Output[Optional[str]]:
|
|
669
668
|
"""
|
|
670
|
-
UUID of the
|
|
669
|
+
UUID of the vdswitch the nic will be attached to. Do not set if you set portgroup.
|
|
671
670
|
"""
|
|
672
671
|
return pulumi.get(self, "distributed_switch_port")
|
|
673
672
|
|