pulumi-vsphere 4.12.0a1727221820__py3-none-any.whl → 4.12.0a1727848995__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-vsphere might be problematic. Click here for more details.

Files changed (40) hide show
  1. pulumi_vsphere/compute_cluster.py +176 -2
  2. pulumi_vsphere/compute_cluster_host_group.py +62 -2
  3. pulumi_vsphere/compute_cluster_vm_affinity_rule.py +24 -2
  4. pulumi_vsphere/compute_cluster_vm_anti_affinity_rule.py +36 -2
  5. pulumi_vsphere/compute_cluster_vm_dependency_rule.py +200 -2
  6. pulumi_vsphere/compute_cluster_vm_group.py +158 -2
  7. pulumi_vsphere/compute_cluster_vm_host_rule.py +68 -2
  8. pulumi_vsphere/content_library.py +98 -2
  9. pulumi_vsphere/content_library_item.py +138 -2
  10. pulumi_vsphere/custom_attribute.py +72 -2
  11. pulumi_vsphere/datacenter.py +14 -4
  12. pulumi_vsphere/datastore_cluster.py +58 -2
  13. pulumi_vsphere/datastore_cluster_vm_anti_affinity_rule.py +154 -2
  14. pulumi_vsphere/distributed_port_group.py +280 -2
  15. pulumi_vsphere/distributed_virtual_switch.py +256 -2
  16. pulumi_vsphere/dpm_host_override.py +58 -2
  17. pulumi_vsphere/drs_vm_override.py +62 -2
  18. pulumi_vsphere/folder.py +136 -2
  19. pulumi_vsphere/get_compute_cluster_host_group.py +2 -2
  20. pulumi_vsphere/ha_vm_override.py +158 -2
  21. pulumi_vsphere/host.py +250 -2
  22. pulumi_vsphere/host_port_group.py +12 -2
  23. pulumi_vsphere/host_virtual_switch.py +64 -2
  24. pulumi_vsphere/nas_datastore.py +62 -2
  25. pulumi_vsphere/pulumi-plugin.json +1 -1
  26. pulumi_vsphere/resource_pool.py +4 -16
  27. pulumi_vsphere/role.py +28 -2
  28. pulumi_vsphere/storage_drs_vm_override.py +128 -2
  29. pulumi_vsphere/tag.py +154 -2
  30. pulumi_vsphere/tag_category.py +78 -2
  31. pulumi_vsphere/vapp_container.py +158 -2
  32. pulumi_vsphere/vapp_entity.py +142 -2
  33. pulumi_vsphere/virtual_disk.py +76 -2
  34. pulumi_vsphere/virtual_machine.py +48 -2
  35. pulumi_vsphere/vmfs_datastore.py +266 -2
  36. pulumi_vsphere/vnic.py +14 -4
  37. {pulumi_vsphere-4.12.0a1727221820.dist-info → pulumi_vsphere-4.12.0a1727848995.dist-info}/METADATA +1 -1
  38. {pulumi_vsphere-4.12.0a1727221820.dist-info → pulumi_vsphere-4.12.0a1727848995.dist-info}/RECORD +40 -40
  39. {pulumi_vsphere-4.12.0a1727221820.dist-info → pulumi_vsphere-4.12.0a1727848995.dist-info}/WHEEL +0 -0
  40. {pulumi_vsphere-4.12.0a1727221820.dist-info → pulumi_vsphere-4.12.0a1727848995.dist-info}/top_level.txt +0 -0
@@ -406,7 +406,77 @@ class VappEntity(pulumi.CustomResource):
406
406
  wait_for_guest: Optional[pulumi.Input[bool]] = None,
407
407
  __props__=None):
408
408
  """
409
- Create a VappEntity resource with the given unique name, props, and options.
409
+ The `VappEntity` resource can be used to describe the behavior of an
410
+ entity (virtual machine or sub-vApp container) in a vApp container.
411
+
412
+ For more information on vSphere vApps, see [this
413
+ page][ref-vsphere-vapp].
414
+
415
+ [ref-vsphere-vapp]: https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-vm-administration/GUID-2A95EBB8-1779-40FA-B4FB-4D0845750879.html
416
+
417
+ ## Example Usage
418
+
419
+ The basic example below sets up a vApp container and a virtual machine in a
420
+ compute cluster and then creates a vApp entity to change the virtual machine's
421
+ power on behavior in the vApp container.
422
+
423
+ ```python
424
+ import pulumi
425
+ import pulumi_vsphere as vsphere
426
+
427
+ config = pulumi.Config()
428
+ datacenter = config.get("datacenter")
429
+ if datacenter is None:
430
+ datacenter = "dc-01"
431
+ cluster = config.get("cluster")
432
+ if cluster is None:
433
+ cluster = "cluster-01"
434
+ datacenter_get_datacenter = vsphere.get_datacenter(name=datacenter)
435
+ compute_cluster = vsphere.get_compute_cluster(name=cluster,
436
+ datacenter_id=datacenter_get_datacenter.id)
437
+ network = vsphere.get_network(name="network1",
438
+ datacenter_id=datacenter_get_datacenter.id)
439
+ datastore = vsphere.get_datastore(name="datastore1",
440
+ datacenter_id=datacenter_get_datacenter.id)
441
+ vapp_container = vsphere.VappContainer("vapp_container",
442
+ name="vapp-container-test",
443
+ parent_resource_pool_id=compute_cluster.id)
444
+ vm = vsphere.VirtualMachine("vm",
445
+ name="virtual-machine-test",
446
+ resource_pool_id=vapp_container.id,
447
+ datastore_id=datastore.id,
448
+ num_cpus=2,
449
+ memory=1024,
450
+ guest_id="ubuntu64Guest",
451
+ disks=[{
452
+ "label": "disk0",
453
+ "size": 1,
454
+ }],
455
+ network_interfaces=[{
456
+ "network_id": network.id,
457
+ }])
458
+ vapp_entity = vsphere.VappEntity("vapp_entity",
459
+ target_id=vm.moid,
460
+ container_id=vapp_container.id,
461
+ start_action="none")
462
+ ```
463
+
464
+ ## Import
465
+
466
+ An existing vApp entity can be imported into this resource via
467
+
468
+ the ID of the vApp Entity.
469
+
470
+ ```sh
471
+ $ pulumi import vsphere:index/vappEntity:VappEntity vapp_entity vm-123:res-456
472
+ ```
473
+
474
+ The above would import the vApp entity that governs the behavior of the virtual
475
+
476
+ machine with a [managed object ID][docs-about-morefs] of vm-123 in the vApp
477
+
478
+ container with the [managed object ID][docs-about-morefs] res-456.
479
+
410
480
  :param str resource_name: The name of the resource.
411
481
  :param pulumi.ResourceOptions opts: Options for the resource.
412
482
  :param pulumi.Input[str] container_id: Managed object ID of the vApp
@@ -438,7 +508,77 @@ class VappEntity(pulumi.CustomResource):
438
508
  args: VappEntityArgs,
439
509
  opts: Optional[pulumi.ResourceOptions] = None):
440
510
  """
441
- Create a VappEntity resource with the given unique name, props, and options.
511
+ The `VappEntity` resource can be used to describe the behavior of an
512
+ entity (virtual machine or sub-vApp container) in a vApp container.
513
+
514
+ For more information on vSphere vApps, see [this
515
+ page][ref-vsphere-vapp].
516
+
517
+ [ref-vsphere-vapp]: https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-vm-administration/GUID-2A95EBB8-1779-40FA-B4FB-4D0845750879.html
518
+
519
+ ## Example Usage
520
+
521
+ The basic example below sets up a vApp container and a virtual machine in a
522
+ compute cluster and then creates a vApp entity to change the virtual machine's
523
+ power on behavior in the vApp container.
524
+
525
+ ```python
526
+ import pulumi
527
+ import pulumi_vsphere as vsphere
528
+
529
+ config = pulumi.Config()
530
+ datacenter = config.get("datacenter")
531
+ if datacenter is None:
532
+ datacenter = "dc-01"
533
+ cluster = config.get("cluster")
534
+ if cluster is None:
535
+ cluster = "cluster-01"
536
+ datacenter_get_datacenter = vsphere.get_datacenter(name=datacenter)
537
+ compute_cluster = vsphere.get_compute_cluster(name=cluster,
538
+ datacenter_id=datacenter_get_datacenter.id)
539
+ network = vsphere.get_network(name="network1",
540
+ datacenter_id=datacenter_get_datacenter.id)
541
+ datastore = vsphere.get_datastore(name="datastore1",
542
+ datacenter_id=datacenter_get_datacenter.id)
543
+ vapp_container = vsphere.VappContainer("vapp_container",
544
+ name="vapp-container-test",
545
+ parent_resource_pool_id=compute_cluster.id)
546
+ vm = vsphere.VirtualMachine("vm",
547
+ name="virtual-machine-test",
548
+ resource_pool_id=vapp_container.id,
549
+ datastore_id=datastore.id,
550
+ num_cpus=2,
551
+ memory=1024,
552
+ guest_id="ubuntu64Guest",
553
+ disks=[{
554
+ "label": "disk0",
555
+ "size": 1,
556
+ }],
557
+ network_interfaces=[{
558
+ "network_id": network.id,
559
+ }])
560
+ vapp_entity = vsphere.VappEntity("vapp_entity",
561
+ target_id=vm.moid,
562
+ container_id=vapp_container.id,
563
+ start_action="none")
564
+ ```
565
+
566
+ ## Import
567
+
568
+ An existing vApp entity can be imported into this resource via
569
+
570
+ the ID of the vApp Entity.
571
+
572
+ ```sh
573
+ $ pulumi import vsphere:index/vappEntity:VappEntity vapp_entity vm-123:res-456
574
+ ```
575
+
576
+ The above would import the vApp entity that governs the behavior of the virtual
577
+
578
+ machine with a [managed object ID][docs-about-morefs] of vm-123 in the vApp
579
+
580
+ container with the [managed object ID][docs-about-morefs] res-456.
581
+
442
582
  :param str resource_name: The name of the resource.
443
583
  :param VappEntityArgs args: The arguments to use to populate this resource's properties.
444
584
  :param pulumi.ResourceOptions opts: Options for the resource.
@@ -370,7 +370,44 @@ class VirtualDisk(pulumi.CustomResource):
370
370
  vmdk_path: Optional[pulumi.Input[str]] = None,
371
371
  __props__=None):
372
372
  """
373
- Create a VirtualDisk resource with the given unique name, props, and options.
373
+ The `VirtualDisk` resource can be used to create virtual disks outside
374
+ of any given `VirtualMachine`
375
+ resource. These disks can be attached to a virtual machine by creating a disk
376
+ block with the `attach` parameter.
377
+
378
+ ## Example Usage
379
+
380
+ ```python
381
+ import pulumi
382
+ import pulumi_vsphere as vsphere
383
+
384
+ datacenter = vsphere.get_datacenter(name="dc-01")
385
+ datastore = vsphere.get_datacenter(name="datastore-01")
386
+ virtual_disk = vsphere.VirtualDisk("virtual_disk",
387
+ size=40,
388
+ type="thin",
389
+ vmdk_path="/foo/foo.vmdk",
390
+ create_directories=True,
391
+ datacenter=datacenter.name,
392
+ datastore=datastore_vsphere_datastore["name"])
393
+ ```
394
+
395
+ ## Import
396
+
397
+ An existing virtual disk can be imported into this resource
398
+
399
+ via supplying the full datastore path to the virtual disk. An example is below:
400
+
401
+ ```sh
402
+ $ pulumi import vsphere:index/virtualDisk:VirtualDisk virtual_disk \\
403
+ ```
404
+
405
+ '{"virtual_disk_path": "/dc-01/[datastore-01]foo/bar.vmdk", \\ "create_directories": "true"}'
406
+
407
+ The above would import the virtual disk located at `foo/bar.vmdk` in the `datastore-01`
408
+
409
+ datastore of the `dc-01` datacenter with `create_directories` set as `true`.
410
+
374
411
  :param str resource_name: The name of the resource.
375
412
  :param pulumi.ResourceOptions opts: Options for the resource.
376
413
  :param pulumi.Input[str] adapter_type: The adapter type for this virtual disk. Can be
@@ -413,7 +450,44 @@ class VirtualDisk(pulumi.CustomResource):
413
450
  args: VirtualDiskArgs,
414
451
  opts: Optional[pulumi.ResourceOptions] = None):
415
452
  """
416
- Create a VirtualDisk resource with the given unique name, props, and options.
453
+ The `VirtualDisk` resource can be used to create virtual disks outside
454
+ of any given `VirtualMachine`
455
+ resource. These disks can be attached to a virtual machine by creating a disk
456
+ block with the `attach` parameter.
457
+
458
+ ## Example Usage
459
+
460
+ ```python
461
+ import pulumi
462
+ import pulumi_vsphere as vsphere
463
+
464
+ datacenter = vsphere.get_datacenter(name="dc-01")
465
+ datastore = vsphere.get_datacenter(name="datastore-01")
466
+ virtual_disk = vsphere.VirtualDisk("virtual_disk",
467
+ size=40,
468
+ type="thin",
469
+ vmdk_path="/foo/foo.vmdk",
470
+ create_directories=True,
471
+ datacenter=datacenter.name,
472
+ datastore=datastore_vsphere_datastore["name"])
473
+ ```
474
+
475
+ ## Import
476
+
477
+ An existing virtual disk can be imported into this resource
478
+
479
+ via supplying the full datastore path to the virtual disk. An example is below:
480
+
481
+ ```sh
482
+ $ pulumi import vsphere:index/virtualDisk:VirtualDisk virtual_disk \\
483
+ ```
484
+
485
+ '{"virtual_disk_path": "/dc-01/[datastore-01]foo/bar.vmdk", \\ "create_directories": "true"}'
486
+
487
+ The above would import the virtual disk located at `foo/bar.vmdk` in the `datastore-01`
488
+
489
+ datastore of the `dc-01` datacenter with `create_directories` set as `true`.
490
+
417
491
  :param str resource_name: The name of the resource.
418
492
  :param VirtualDiskArgs args: The arguments to use to populate this resource's properties.
419
493
  :param pulumi.ResourceOptions opts: Options for the resource.
@@ -2718,7 +2718,30 @@ class VirtualMachine(pulumi.CustomResource):
2718
2718
  wait_for_guest_net_timeout: Optional[pulumi.Input[int]] = None,
2719
2719
  __props__=None):
2720
2720
  """
2721
- Create a VirtualMachine resource with the given unique name, props, and options.
2721
+ ## Import
2722
+
2723
+ ### Additional Importing Requirements
2724
+
2725
+ Many of the requirements for [cloning](#additional-requirements-and-notes-for-cloning) apply to importing. Although importing writes directly to the Terraform state, some rules can not be enforced during import time, so every effort should be made to ensure the correctness of the configuration before the import.
2726
+
2727
+ The following requirements apply to import:
2728
+
2729
+ * The disks must have a [`label`](#label) argument assigned in a convention matching `diskN`, starting with disk number 0, based on each virtual disk order on the SCSI bus. As an example, a disk on SCSI controller `0` with a unit number of `0` would be labeled as `disk0`, a disk on the same controller with a unit number of `1` would be `disk1`, but the next disk, which is on SCSI controller `1` with a unit number of `0`, still becomes `disk2`.
2730
+
2731
+ * Disks are always imported with [`keep_on_remove`](#keep_on_remove) enabled until the first `pulumi up` run which will remove the setting for known disks. This process safeguards against naming or accounting mistakes in the disk configuration.
2732
+
2733
+ * The storage controller count for the resource is set to the number of contiguous storage controllers found, starting with the controller at SCSI bus number `0`. If no storage controllers are discovered, the virtual machine is not eligible for import. For maximum compatibility, ensure that the virtual machine has the exact number of storage controllers needed and set the storage controller count accordingly.
2734
+
2735
+ After importing, you should run `pulumi preview`. Unless you have changed anything else in the configuration that would cause other attributes to change. The only difference should be configuration-only changes, which are typically comprised of:
2736
+
2737
+ * The [`imported`](#imported) flag will transition from `true` to `false`.
2738
+
2739
+ * The [`keep_on_remove`](#keep_on_remove) of known disks will transition from `true` to `false`.
2740
+
2741
+ * Configuration supplied in the [`clone`](#clone) block, if present, will be persisted to state. This initial persistence operation does not perform any cloning or customization actions, nor does it force a new resource. After the first apply operation, further changes to `clone` will force the creation of a new resource.
2742
+
2743
+ These changes only update Terraform state when applied. Hence, it is safe to run when the virtual machine is running. If more settings are modified, you may need to plan maintenance accordingly for any necessary virtual machine re-configurations.
2744
+
2722
2745
  :param str resource_name: The name of the resource.
2723
2746
  :param pulumi.ResourceOptions opts: Options for the resource.
2724
2747
  :param pulumi.Input[str] alternate_guest_name: The guest name for the operating system when guest_id is otherGuest or otherGuest64.
@@ -2816,7 +2839,30 @@ class VirtualMachine(pulumi.CustomResource):
2816
2839
  args: VirtualMachineArgs,
2817
2840
  opts: Optional[pulumi.ResourceOptions] = None):
2818
2841
  """
2819
- Create a VirtualMachine resource with the given unique name, props, and options.
2842
+ ## Import
2843
+
2844
+ ### Additional Importing Requirements
2845
+
2846
+ Many of the requirements for [cloning](#additional-requirements-and-notes-for-cloning) apply to importing. Although importing writes directly to the Terraform state, some rules can not be enforced during import time, so every effort should be made to ensure the correctness of the configuration before the import.
2847
+
2848
+ The following requirements apply to import:
2849
+
2850
+ * The disks must have a [`label`](#label) argument assigned in a convention matching `diskN`, starting with disk number 0, based on each virtual disk order on the SCSI bus. As an example, a disk on SCSI controller `0` with a unit number of `0` would be labeled as `disk0`, a disk on the same controller with a unit number of `1` would be `disk1`, but the next disk, which is on SCSI controller `1` with a unit number of `0`, still becomes `disk2`.
2851
+
2852
+ * Disks are always imported with [`keep_on_remove`](#keep_on_remove) enabled until the first `pulumi up` run which will remove the setting for known disks. This process safeguards against naming or accounting mistakes in the disk configuration.
2853
+
2854
+ * The storage controller count for the resource is set to the number of contiguous storage controllers found, starting with the controller at SCSI bus number `0`. If no storage controllers are discovered, the virtual machine is not eligible for import. For maximum compatibility, ensure that the virtual machine has the exact number of storage controllers needed and set the storage controller count accordingly.
2855
+
2856
+ After importing, you should run `pulumi preview`. Unless you have changed anything else in the configuration that would cause other attributes to change. The only difference should be configuration-only changes, which are typically comprised of:
2857
+
2858
+ * The [`imported`](#imported) flag will transition from `true` to `false`.
2859
+
2860
+ * The [`keep_on_remove`](#keep_on_remove) of known disks will transition from `true` to `false`.
2861
+
2862
+ * Configuration supplied in the [`clone`](#clone) block, if present, will be persisted to state. This initial persistence operation does not perform any cloning or customization actions, nor does it force a new resource. After the first apply operation, further changes to `clone` will force the creation of a new resource.
2863
+
2864
+ These changes only update Terraform state when applied. Hence, it is safe to run when the virtual machine is running. If more settings are modified, you may need to plan maintenance accordingly for any necessary virtual machine re-configurations.
2865
+
2820
2866
  :param str resource_name: The name of the resource.
2821
2867
  :param VirtualMachineArgs args: The arguments to use to populate this resource's properties.
2822
2868
  :param pulumi.ResourceOptions opts: Options for the resource.
@@ -461,7 +461,139 @@ class VmfsDatastore(pulumi.CustomResource):
461
461
  tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
462
462
  __props__=None):
463
463
  """
464
- Create a VmfsDatastore resource with the given unique name, props, and options.
464
+ The `VmfsDatastore` resource can be used to create and manage VMFS
465
+ datastores on an ESXi host or a set of hosts. The resource supports using any
466
+ SCSI device that can generally be used in a datastore, such as local disks, or
467
+ disks presented to a host or multiple hosts over Fibre Channel or iSCSI.
468
+ Devices can be specified manually, or discovered using the
469
+ [`get_vmfs_disks`][data-source-vmfs-disks] data source.
470
+
471
+ [data-source-vmfs-disks]: /docs/providers/vsphere/d/vmfs_disks.html
472
+
473
+ ## Auto-Mounting of Datastores Within vCenter
474
+
475
+ Note that the current behavior of this resource will auto-mount any created
476
+ datastores to any other host within vCenter that has access to the same disk.
477
+
478
+ Example: You want to create a datastore with a iSCSI LUN that is visible on 3
479
+ hosts in a single vSphere cluster (`esxi1`, `esxi2` and `esxi3`). When you
480
+ create the datastore on `esxi1`, the datastore will be automatically mounted on
481
+ `esxi2` and `esxi3`, without the need to configure the resource on either of
482
+ those two hosts.
483
+
484
+ Future versions of this resource may allow you to control the hosts that a
485
+ datastore is mounted to, but currently, this automatic behavior cannot be
486
+ changed, so keep this in mind when writing your configurations and deploying
487
+ your disks.
488
+
489
+ ## Increasing Datastore Size
490
+
491
+ To increase the size of a datastore, you must add additional disks to the
492
+ `disks` attribute. Expanding the size of a datastore by increasing the size of
493
+ an already provisioned disk is currently not supported (but may be in future
494
+ versions of this resource).
495
+
496
+ > **NOTE:** You cannot decrease the size of a datastore. If the resource
497
+ detects disks removed from the configuration, the provider will give an error.
498
+
499
+ [cmd-taint]: /docs/commands/taint.html
500
+
501
+ ## Example Usage
502
+
503
+ ### Addition of local disks on a single host
504
+
505
+ The following example uses the default datacenter and default host to add a
506
+ datastore with local disks to a single ESXi server.
507
+
508
+ > **NOTE:** There are some situations where datastore creation will not work
509
+ when working through vCenter (usually when trying to create a datastore on a
510
+ single host with local disks). If you experience trouble creating the datastore
511
+ you need through vCenter, break the datastore off into a different configuration
512
+ and deploy it using the ESXi server as the provider endpoint, using a similar
513
+ configuration to what is below.
514
+
515
+ ```python
516
+ import pulumi
517
+ import pulumi_vsphere as vsphere
518
+
519
+ datacenter = vsphere.get_datacenter()
520
+ host = vsphere.get_host(datacenter_id=datacenter.id)
521
+ datastore = vsphere.VmfsDatastore("datastore",
522
+ name="test",
523
+ host_system_id=esxi_host["id"],
524
+ disks=[
525
+ "mpx.vmhba1:C0:T1:L0",
526
+ "mpx.vmhba1:C0:T2:L0",
527
+ "mpx.vmhba1:C0:T2:L0",
528
+ ])
529
+ ```
530
+
531
+ ### Auto-detection of disks via `get_vmfs_disks`
532
+
533
+ The following example makes use of the
534
+ `get_vmfs_disks` data source to auto-detect
535
+ exported iSCSI LUNS matching a certain NAA vendor ID (in this case, LUNs
536
+ exported from a [NetApp][ext-netapp]). These discovered disks are then loaded
537
+ into `VmfsDatastore`. The datastore is also placed in the
538
+ `datastore-folder` folder afterwards.
539
+
540
+ [ext-netapp]: https://kb.netapp.com/support/s/article/ka31A0000000rLRQAY/how-to-match-a-lun-s-naa-number-to-its-serial-number?language=en_US
541
+
542
+ ```python
543
+ import pulumi
544
+ import pulumi_vsphere as vsphere
545
+
546
+ datacenter = vsphere.get_datacenter(name="dc-01")
547
+ host = vsphere.get_host(name="esxi-01.example.com",
548
+ datacenter_id=datacenter.id)
549
+ available = vsphere.get_vmfs_disks(host_system_id=host.id,
550
+ rescan=True,
551
+ filter="naa.60a98000")
552
+ datastore = vsphere.VmfsDatastore("datastore",
553
+ name="test",
554
+ host_system_id=esxi_host["id"],
555
+ folder="datastore-folder",
556
+ disks=[available.disks])
557
+ ```
558
+
559
+ ## Import
560
+
561
+ An existing VMFS datastore can be imported into this resource
562
+
563
+ via its managed object ID, via the command below. You also need the host system
564
+
565
+ ID.
566
+
567
+ ```sh
568
+ $ pulumi import vsphere:index/vmfsDatastore:VmfsDatastore datastore datastore-123:host-10
569
+ ```
570
+
571
+ You need a tool like [`govc`][ext-govc] that can display managed object IDs.
572
+
573
+ [ext-govc]: https://github.com/vmware/govmomi/tree/master/govc
574
+
575
+ In the case of govc, you can locate a managed object ID from an inventory path
576
+
577
+ by doing the following:
578
+
579
+ $ govc ls -i /dc/datastore/terraform-test
580
+
581
+ Datastore:datastore-123
582
+
583
+ To locate host IDs, it might be a good idea to supply the `-l` flag as well so
584
+
585
+ that you can line up the names with the IDs:
586
+
587
+ $ govc ls -l -i /dc/host/cluster1
588
+
589
+ ResourcePool:resgroup-10 /dc/host/cluster1/Resources
590
+
591
+ HostSystem:host-10 /dc/host/cluster1/esxi1
592
+
593
+ HostSystem:host-11 /dc/host/cluster1/esxi2
594
+
595
+ HostSystem:host-12 /dc/host/cluster1/esxi3
596
+
465
597
  :param str resource_name: The name of the resource.
466
598
  :param pulumi.ResourceOptions opts: Options for the resource.
467
599
  :param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: Map of custom attribute ids to attribute
@@ -499,7 +631,139 @@ class VmfsDatastore(pulumi.CustomResource):
499
631
  args: VmfsDatastoreArgs,
500
632
  opts: Optional[pulumi.ResourceOptions] = None):
501
633
  """
502
- Create a VmfsDatastore resource with the given unique name, props, and options.
634
+ The `VmfsDatastore` resource can be used to create and manage VMFS
635
+ datastores on an ESXi host or a set of hosts. The resource supports using any
636
+ SCSI device that can generally be used in a datastore, such as local disks, or
637
+ disks presented to a host or multiple hosts over Fibre Channel or iSCSI.
638
+ Devices can be specified manually, or discovered using the
639
+ [`get_vmfs_disks`][data-source-vmfs-disks] data source.
640
+
641
+ [data-source-vmfs-disks]: /docs/providers/vsphere/d/vmfs_disks.html
642
+
643
+ ## Auto-Mounting of Datastores Within vCenter
644
+
645
+ Note that the current behavior of this resource will auto-mount any created
646
+ datastores to any other host within vCenter that has access to the same disk.
647
+
648
+ Example: You want to create a datastore with a iSCSI LUN that is visible on 3
649
+ hosts in a single vSphere cluster (`esxi1`, `esxi2` and `esxi3`). When you
650
+ create the datastore on `esxi1`, the datastore will be automatically mounted on
651
+ `esxi2` and `esxi3`, without the need to configure the resource on either of
652
+ those two hosts.
653
+
654
+ Future versions of this resource may allow you to control the hosts that a
655
+ datastore is mounted to, but currently, this automatic behavior cannot be
656
+ changed, so keep this in mind when writing your configurations and deploying
657
+ your disks.
658
+
659
+ ## Increasing Datastore Size
660
+
661
+ To increase the size of a datastore, you must add additional disks to the
662
+ `disks` attribute. Expanding the size of a datastore by increasing the size of
663
+ an already provisioned disk is currently not supported (but may be in future
664
+ versions of this resource).
665
+
666
+ > **NOTE:** You cannot decrease the size of a datastore. If the resource
667
+ detects disks removed from the configuration, the provider will give an error.
668
+
669
+ [cmd-taint]: /docs/commands/taint.html
670
+
671
+ ## Example Usage
672
+
673
+ ### Addition of local disks on a single host
674
+
675
+ The following example uses the default datacenter and default host to add a
676
+ datastore with local disks to a single ESXi server.
677
+
678
+ > **NOTE:** There are some situations where datastore creation will not work
679
+ when working through vCenter (usually when trying to create a datastore on a
680
+ single host with local disks). If you experience trouble creating the datastore
681
+ you need through vCenter, break the datastore off into a different configuration
682
+ and deploy it using the ESXi server as the provider endpoint, using a similar
683
+ configuration to what is below.
684
+
685
+ ```python
686
+ import pulumi
687
+ import pulumi_vsphere as vsphere
688
+
689
+ datacenter = vsphere.get_datacenter()
690
+ host = vsphere.get_host(datacenter_id=datacenter.id)
691
+ datastore = vsphere.VmfsDatastore("datastore",
692
+ name="test",
693
+ host_system_id=esxi_host["id"],
694
+ disks=[
695
+ "mpx.vmhba1:C0:T1:L0",
696
+ "mpx.vmhba1:C0:T2:L0",
697
+ "mpx.vmhba1:C0:T2:L0",
698
+ ])
699
+ ```
700
+
701
+ ### Auto-detection of disks via `get_vmfs_disks`
702
+
703
+ The following example makes use of the
704
+ `get_vmfs_disks` data source to auto-detect
705
+ exported iSCSI LUNS matching a certain NAA vendor ID (in this case, LUNs
706
+ exported from a [NetApp][ext-netapp]). These discovered disks are then loaded
707
+ into `VmfsDatastore`. The datastore is also placed in the
708
+ `datastore-folder` folder afterwards.
709
+
710
+ [ext-netapp]: https://kb.netapp.com/support/s/article/ka31A0000000rLRQAY/how-to-match-a-lun-s-naa-number-to-its-serial-number?language=en_US
711
+
712
+ ```python
713
+ import pulumi
714
+ import pulumi_vsphere as vsphere
715
+
716
+ datacenter = vsphere.get_datacenter(name="dc-01")
717
+ host = vsphere.get_host(name="esxi-01.example.com",
718
+ datacenter_id=datacenter.id)
719
+ available = vsphere.get_vmfs_disks(host_system_id=host.id,
720
+ rescan=True,
721
+ filter="naa.60a98000")
722
+ datastore = vsphere.VmfsDatastore("datastore",
723
+ name="test",
724
+ host_system_id=esxi_host["id"],
725
+ folder="datastore-folder",
726
+ disks=[available.disks])
727
+ ```
728
+
729
+ ## Import
730
+
731
+ An existing VMFS datastore can be imported into this resource
732
+
733
+ via its managed object ID, via the command below. You also need the host system
734
+
735
+ ID.
736
+
737
+ ```sh
738
+ $ pulumi import vsphere:index/vmfsDatastore:VmfsDatastore datastore datastore-123:host-10
739
+ ```
740
+
741
+ You need a tool like [`govc`][ext-govc] that can display managed object IDs.
742
+
743
+ [ext-govc]: https://github.com/vmware/govmomi/tree/master/govc
744
+
745
+ In the case of govc, you can locate a managed object ID from an inventory path
746
+
747
+ by doing the following:
748
+
749
+ $ govc ls -i /dc/datastore/terraform-test
750
+
751
+ Datastore:datastore-123
752
+
753
+ To locate host IDs, it might be a good idea to supply the `-l` flag as well so
754
+
755
+ that you can line up the names with the IDs:
756
+
757
+ $ govc ls -l -i /dc/host/cluster1
758
+
759
+ ResourcePool:resgroup-10 /dc/host/cluster1/Resources
760
+
761
+ HostSystem:host-10 /dc/host/cluster1/esxi1
762
+
763
+ HostSystem:host-11 /dc/host/cluster1/esxi2
764
+
765
+ HostSystem:host-12 /dc/host/cluster1/esxi3
766
+
503
767
  :param str resource_name: The name of the resource.
504
768
  :param VmfsDatastoreArgs args: The arguments to use to populate this resource's properties.
505
769
  :param pulumi.ResourceOptions opts: Options for the resource.
pulumi_vsphere/vnic.py CHANGED
@@ -433,13 +433,18 @@ class Vnic(pulumi.CustomResource):
433
433
  ])
434
434
  ```
435
435
 
436
- ## Importing
436
+ ## Import
437
+
438
+ An existing vNic can be imported into this resource
437
439
 
438
- An existing vNic can be [imported][docs-import] into this resource
439
440
  via supplying the vNic's ID. An example is below:
440
441
 
441
442
  [docs-import]: /docs/import/index.html
442
443
 
444
+ ```sh
445
+ $ pulumi import vsphere:index/vnic:Vnic vnic host-123_vmk2
446
+ ```
447
+
443
448
  The above would import the vnic `vmk2` from host with ID `host-123`.
444
449
 
445
450
  :param str resource_name: The name of the resource.
@@ -530,13 +535,18 @@ class Vnic(pulumi.CustomResource):
530
535
  ])
531
536
  ```
532
537
 
533
- ## Importing
538
+ ## Import
539
+
540
+ An existing vNic can be imported into this resource
534
541
 
535
- An existing vNic can be [imported][docs-import] into this resource
536
542
  via supplying the vNic's ID. An example is below:
537
543
 
538
544
  [docs-import]: /docs/import/index.html
539
545
 
546
+ ```sh
547
+ $ pulumi import vsphere:index/vnic:Vnic vnic host-123_vmk2
548
+ ```
549
+
540
550
  The above would import the vnic `vmk2` from host with ID `host-123`.
541
551
 
542
552
  :param str resource_name: The name of the resource.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pulumi_vsphere
3
- Version: 4.12.0a1727221820
3
+ Version: 4.12.0a1727848995
4
4
  Summary: A Pulumi package for creating vsphere resources
5
5
  License: Apache-2.0
6
6
  Project-URL: Homepage, https://pulumi.io