pulumi-vsphere 4.12.0a1727221820__py3-none-any.whl → 4.12.0a1727848995__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-vsphere might be problematic. Click here for more details.

Files changed (40) hide show
  1. pulumi_vsphere/compute_cluster.py +176 -2
  2. pulumi_vsphere/compute_cluster_host_group.py +62 -2
  3. pulumi_vsphere/compute_cluster_vm_affinity_rule.py +24 -2
  4. pulumi_vsphere/compute_cluster_vm_anti_affinity_rule.py +36 -2
  5. pulumi_vsphere/compute_cluster_vm_dependency_rule.py +200 -2
  6. pulumi_vsphere/compute_cluster_vm_group.py +158 -2
  7. pulumi_vsphere/compute_cluster_vm_host_rule.py +68 -2
  8. pulumi_vsphere/content_library.py +98 -2
  9. pulumi_vsphere/content_library_item.py +138 -2
  10. pulumi_vsphere/custom_attribute.py +72 -2
  11. pulumi_vsphere/datacenter.py +14 -4
  12. pulumi_vsphere/datastore_cluster.py +58 -2
  13. pulumi_vsphere/datastore_cluster_vm_anti_affinity_rule.py +154 -2
  14. pulumi_vsphere/distributed_port_group.py +280 -2
  15. pulumi_vsphere/distributed_virtual_switch.py +256 -2
  16. pulumi_vsphere/dpm_host_override.py +58 -2
  17. pulumi_vsphere/drs_vm_override.py +62 -2
  18. pulumi_vsphere/folder.py +136 -2
  19. pulumi_vsphere/get_compute_cluster_host_group.py +2 -2
  20. pulumi_vsphere/ha_vm_override.py +158 -2
  21. pulumi_vsphere/host.py +250 -2
  22. pulumi_vsphere/host_port_group.py +12 -2
  23. pulumi_vsphere/host_virtual_switch.py +64 -2
  24. pulumi_vsphere/nas_datastore.py +62 -2
  25. pulumi_vsphere/pulumi-plugin.json +1 -1
  26. pulumi_vsphere/resource_pool.py +4 -16
  27. pulumi_vsphere/role.py +28 -2
  28. pulumi_vsphere/storage_drs_vm_override.py +128 -2
  29. pulumi_vsphere/tag.py +154 -2
  30. pulumi_vsphere/tag_category.py +78 -2
  31. pulumi_vsphere/vapp_container.py +158 -2
  32. pulumi_vsphere/vapp_entity.py +142 -2
  33. pulumi_vsphere/virtual_disk.py +76 -2
  34. pulumi_vsphere/virtual_machine.py +48 -2
  35. pulumi_vsphere/vmfs_datastore.py +266 -2
  36. pulumi_vsphere/vnic.py +14 -4
  37. {pulumi_vsphere-4.12.0a1727221820.dist-info → pulumi_vsphere-4.12.0a1727848995.dist-info}/METADATA +1 -1
  38. {pulumi_vsphere-4.12.0a1727221820.dist-info → pulumi_vsphere-4.12.0a1727848995.dist-info}/RECORD +40 -40
  39. {pulumi_vsphere-4.12.0a1727221820.dist-info → pulumi_vsphere-4.12.0a1727848995.dist-info}/WHEEL +0 -0
  40. {pulumi_vsphere-4.12.0a1727221820.dist-info → pulumi_vsphere-4.12.0a1727848995.dist-info}/top_level.txt +0 -0
@@ -3239,7 +3239,134 @@ class DistributedVirtualSwitch(pulumi.CustomResource):
3239
3239
  vsan_share_level: Optional[pulumi.Input[str]] = None,
3240
3240
  __props__=None):
3241
3241
  """
3242
- Create a DistributedVirtualSwitch resource with the given unique name, props, and options.
3242
+ The `DistributedVirtualSwitch` resource can be used to manage vSphere
3243
+ Distributed Switches (VDS).
3244
+
3245
+ An essential component of a distributed, scalable vSphere infrastructure, the
3246
+ VDS provides centralized management and monitoring of the networking
3247
+ configuration for all the hosts that are associated with the switch.
3248
+ In addition to adding distributed port groups
3249
+ (see the `DistributedPortGroup` resource)
3250
+ that can be used as networks for virtual machines, a VDS can be configured to
3251
+ perform advanced high availability, traffic shaping, network monitoring, etc.
3252
+
3253
+ For an overview on vSphere networking concepts, see
3254
+ [this page][ref-vsphere-net-concepts].
3255
+
3256
+ For more information on the VDS, see [this page][ref-vsphere-vds].
3257
+
3258
+ [ref-vsphere-net-concepts]: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.networking.doc/GUID-2B11DBB8-CB3C-4AFF-8885-EFEA0FC562F4.html
3259
+ [ref-vsphere-vds]: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.networking.doc/GUID-375B45C7-684C-4C51-BA3C-70E48DFABF04.html
3260
+
3261
+ > **NOTE:** This resource requires vCenter and is not available on
3262
+ direct ESXi host connections.
3263
+
3264
+ ## Example Usage
3265
+
3266
+ The following example below demonstrates a "standard" example of configuring a
3267
+ VDS in a 3-node vSphere datacenter named `dc1`, across 4 NICs with two being
3268
+ used as active, and two being used as passive. Note that the NIC failover order
3269
+ propagates to any port groups configured on this VDS and can be overridden.
3270
+
3271
+ ```python
3272
+ import pulumi
3273
+ import pulumi_vsphere as vsphere
3274
+
3275
+ config = pulumi.Config()
3276
+ hosts = config.get_object("hosts")
3277
+ if hosts is None:
3278
+ hosts = [
3279
+ "esxi-01.example.com",
3280
+ "esxi-02.example.com",
3281
+ "esxi-03.example.com",
3282
+ ]
3283
+ network_interfaces = config.get_object("networkInterfaces")
3284
+ if network_interfaces is None:
3285
+ network_interfaces = [
3286
+ "vmnic0",
3287
+ "vmnic1",
3288
+ "vmnic2",
3289
+ "vmnic3",
3290
+ ]
3291
+ datacenter = vsphere.get_datacenter(name="dc-01")
3292
+ host = [vsphere.get_host(name=hosts[__index],
3293
+ datacenter_id=datacenter.id) for __index in range(len(hosts))]
3294
+ vds = vsphere.DistributedVirtualSwitch("vds",
3295
+ name="vds-01",
3296
+ datacenter_id=datacenter.id,
3297
+ uplinks=[
3298
+ "uplink1",
3299
+ "uplink2",
3300
+ "uplink3",
3301
+ "uplink4",
3302
+ ],
3303
+ active_uplinks=[
3304
+ "uplink1",
3305
+ "uplink2",
3306
+ ],
3307
+ standby_uplinks=[
3308
+ "uplink3",
3309
+ "uplink4",
3310
+ ],
3311
+ hosts=[
3312
+ {
3313
+ "host_system_id": host[0].id,
3314
+ "devices": [network_interfaces],
3315
+ },
3316
+ {
3317
+ "host_system_id": host[1].id,
3318
+ "devices": [network_interfaces],
3319
+ },
3320
+ {
3321
+ "host_system_id": host[2].id,
3322
+ "devices": [network_interfaces],
3323
+ },
3324
+ ])
3325
+ ```
3326
+
3327
+ ### Uplink name and count control
3328
+
3329
+ The following abridged example below demonstrates how you can manage the number
3330
+ of uplinks, and the name of the uplinks via the `uplinks` parameter.
3331
+
3332
+ Note that if you change the uplink naming and count after creating the VDS, you
3333
+ may need to explicitly specify `active_uplinks` and `standby_uplinks` as these
3334
+ values are saved to state after creation, regardless of being
3335
+ specified in config, and will drift if not modified, causing errors.
3336
+
3337
+ ```python
3338
+ import pulumi
3339
+ import pulumi_vsphere as vsphere
3340
+
3341
+ vds = vsphere.DistributedVirtualSwitch("vds",
3342
+ name="vds-01",
3343
+ datacenter_id=datacenter["id"],
3344
+ uplinks=[
3345
+ "uplink1",
3346
+ "uplink2",
3347
+ ],
3348
+ active_uplinks=["uplink1"],
3349
+ standby_uplinks=["uplink2"])
3350
+ ```
3351
+
3352
+ > **NOTE:** The default uplink names when a VDS is created are `uplink1`
3353
+ through to `uplink4`, however this default is not guaranteed to be stable and
3354
+ you are encouraged to set your own.
3355
+
3356
+ ## Import
3357
+
3358
+ An existing VDS can be imported into this resource via the path
3359
+
3360
+ to the VDS, via the following command:
3361
+
3362
+ ```sh
3363
+ $ pulumi import vsphere:index/distributedVirtualSwitch:DistributedVirtualSwitch vds /dc-01/network/vds-01
3364
+ ```
3365
+
3366
+ The above would import the VDS named `vds-01` that is located in the `dc-01`
3367
+
3368
+ datacenter.
3369
+
3243
3370
  :param str resource_name: The name of the resource.
3244
3371
  :param pulumi.ResourceOptions opts: Options for the resource.
3245
3372
  :param pulumi.Input[Sequence[pulumi.Input[str]]] active_uplinks: List of active uplinks used for load balancing, matching the names of the uplinks assigned in the DVS.
@@ -3365,7 +3492,134 @@ class DistributedVirtualSwitch(pulumi.CustomResource):
3365
3492
  args: DistributedVirtualSwitchArgs,
3366
3493
  opts: Optional[pulumi.ResourceOptions] = None):
3367
3494
  """
3368
- Create a DistributedVirtualSwitch resource with the given unique name, props, and options.
3495
+ The `DistributedVirtualSwitch` resource can be used to manage vSphere
3496
+ Distributed Switches (VDS).
3497
+
3498
+ An essential component of a distributed, scalable vSphere infrastructure, the
3499
+ VDS provides centralized management and monitoring of the networking
3500
+ configuration for all the hosts that are associated with the switch.
3501
+ In addition to adding distributed port groups
3502
+ (see the `DistributedPortGroup` resource)
3503
+ that can be used as networks for virtual machines, a VDS can be configured to
3504
+ perform advanced high availability, traffic shaping, network monitoring, etc.
3505
+
3506
+ For an overview on vSphere networking concepts, see
3507
+ [this page][ref-vsphere-net-concepts].
3508
+
3509
+ For more information on the VDS, see [this page][ref-vsphere-vds].
3510
+
3511
+ [ref-vsphere-net-concepts]: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.networking.doc/GUID-2B11DBB8-CB3C-4AFF-8885-EFEA0FC562F4.html
3512
+ [ref-vsphere-vds]: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.networking.doc/GUID-375B45C7-684C-4C51-BA3C-70E48DFABF04.html
3513
+
3514
+ > **NOTE:** This resource requires vCenter and is not available on
3515
+ direct ESXi host connections.
3516
+
3517
+ ## Example Usage
3518
+
3519
+ The following example below demonstrates a "standard" example of configuring a
3520
+ VDS in a 3-node vSphere datacenter named `dc1`, across 4 NICs with two being
3521
+ used as active, and two being used as passive. Note that the NIC failover order
3522
+ propagates to any port groups configured on this VDS and can be overridden.
3523
+
3524
+ ```python
3525
+ import pulumi
3526
+ import pulumi_vsphere as vsphere
3527
+
3528
+ config = pulumi.Config()
3529
+ hosts = config.get_object("hosts")
3530
+ if hosts is None:
3531
+ hosts = [
3532
+ "esxi-01.example.com",
3533
+ "esxi-02.example.com",
3534
+ "esxi-03.example.com",
3535
+ ]
3536
+ network_interfaces = config.get_object("networkInterfaces")
3537
+ if network_interfaces is None:
3538
+ network_interfaces = [
3539
+ "vmnic0",
3540
+ "vmnic1",
3541
+ "vmnic2",
3542
+ "vmnic3",
3543
+ ]
3544
+ datacenter = vsphere.get_datacenter(name="dc-01")
3545
+ host = [vsphere.get_host(name=hosts[__index],
3546
+ datacenter_id=datacenter.id) for __index in range(len(hosts))]
3547
+ vds = vsphere.DistributedVirtualSwitch("vds",
3548
+ name="vds-01",
3549
+ datacenter_id=datacenter.id,
3550
+ uplinks=[
3551
+ "uplink1",
3552
+ "uplink2",
3553
+ "uplink3",
3554
+ "uplink4",
3555
+ ],
3556
+ active_uplinks=[
3557
+ "uplink1",
3558
+ "uplink2",
3559
+ ],
3560
+ standby_uplinks=[
3561
+ "uplink3",
3562
+ "uplink4",
3563
+ ],
3564
+ hosts=[
3565
+ {
3566
+ "host_system_id": host[0].id,
3567
+ "devices": [network_interfaces],
3568
+ },
3569
+ {
3570
+ "host_system_id": host[1].id,
3571
+ "devices": [network_interfaces],
3572
+ },
3573
+ {
3574
+ "host_system_id": host[2].id,
3575
+ "devices": [network_interfaces],
3576
+ },
3577
+ ])
3578
+ ```
3579
+
3580
+ ### Uplink name and count control
3581
+
3582
+ The following abridged example below demonstrates how you can manage the number
3583
+ of uplinks, and the name of the uplinks via the `uplinks` parameter.
3584
+
3585
+ Note that if you change the uplink naming and count after creating the VDS, you
3586
+ may need to explicitly specify `active_uplinks` and `standby_uplinks` as these
3587
+ values are saved to state after creation, regardless of being
3588
+ specified in config, and will drift if not modified, causing errors.
3589
+
3590
+ ```python
3591
+ import pulumi
3592
+ import pulumi_vsphere as vsphere
3593
+
3594
+ vds = vsphere.DistributedVirtualSwitch("vds",
3595
+ name="vds-01",
3596
+ datacenter_id=datacenter["id"],
3597
+ uplinks=[
3598
+ "uplink1",
3599
+ "uplink2",
3600
+ ],
3601
+ active_uplinks=["uplink1"],
3602
+ standby_uplinks=["uplink2"])
3603
+ ```
3604
+
3605
+ > **NOTE:** The default uplink names when a VDS is created are `uplink1`
3606
+ through to `uplink4`, however this default is not guaranteed to be stable and
3607
+ you are encouraged to set your own.
3608
+
3609
+ ## Import
3610
+
3611
+ An existing VDS can be imported into this resource via the path
3612
+
3613
+ to the VDS, via the following command:
3614
+
3615
+ ```sh
3616
+ $ pulumi import vsphere:index/distributedVirtualSwitch:DistributedVirtualSwitch vds /dc-01/network/vds-01
3617
+ ```
3618
+
3619
+ The above would import the VDS named `vds-01` that is located in the `dc-01`
3620
+
3621
+ datacenter.
3622
+
3369
3623
  :param str resource_name: The name of the resource.
3370
3624
  :param DistributedVirtualSwitchArgs args: The arguments to use to populate this resource's properties.
3371
3625
  :param pulumi.ResourceOptions opts: Options for the resource.
@@ -200,7 +200,35 @@ class DpmHostOverride(pulumi.CustomResource):
200
200
  host_system_id: Optional[pulumi.Input[str]] = None,
201
201
  __props__=None):
202
202
  """
203
- Create a DpmHostOverride resource with the given unique name, props, and options.
203
+ The `DpmHostOverride` resource can be used to add a DPM override to a
204
+ cluster for a particular host. This allows you to control the power management
205
+ settings for individual hosts in the cluster while leaving any unspecified ones
206
+ at the default power management settings.
207
+
208
+ For more information on DPM within vSphere clusters, see [this
209
+ page][ref-vsphere-cluster-dpm].
210
+
211
+ [ref-vsphere-cluster-dpm]: https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-resource-management/GUID-5E5E349A-4644-4C9C-B434-1C0243EBDC80.html
212
+
213
+ > **NOTE:** This resource requires vCenter and is not available on direct ESXi
214
+ connections.
215
+
216
+ ## Import
217
+
218
+ An existing override can be imported into this resource by
219
+
220
+ supplying both the path to the cluster, and the path to the host, to `terraform
221
+
222
+ import`. If no override exists, an error will be given. An example is below:
223
+
224
+ ```sh
225
+ $ pulumi import vsphere:index/dpmHostOverride:DpmHostOverride dpm_host_override \\
226
+ ```
227
+
228
+ '{"compute_cluster_path": "/dc1/host/cluster1", \\
229
+
230
+ "host_path": "/dc1/host/esxi1"}'
231
+
204
232
  :param str resource_name: The name of the resource.
205
233
  :param pulumi.ResourceOptions opts: Options for the resource.
206
234
  :param pulumi.Input[str] compute_cluster_id: The managed object reference
@@ -224,7 +252,35 @@ class DpmHostOverride(pulumi.CustomResource):
224
252
  args: DpmHostOverrideArgs,
225
253
  opts: Optional[pulumi.ResourceOptions] = None):
226
254
  """
227
- Create a DpmHostOverride resource with the given unique name, props, and options.
255
+ The `DpmHostOverride` resource can be used to add a DPM override to a
256
+ cluster for a particular host. This allows you to control the power management
257
+ settings for individual hosts in the cluster while leaving any unspecified ones
258
+ at the default power management settings.
259
+
260
+ For more information on DPM within vSphere clusters, see [this
261
+ page][ref-vsphere-cluster-dpm].
262
+
263
+ [ref-vsphere-cluster-dpm]: https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-resource-management/GUID-5E5E349A-4644-4C9C-B434-1C0243EBDC80.html
264
+
265
+ > **NOTE:** This resource requires vCenter and is not available on direct ESXi
266
+ connections.
267
+
268
+ ## Import
269
+
270
+ An existing override can be imported into this resource by
271
+
272
+ supplying both the path to the cluster, and the path to the host, to `terraform
273
+
274
+ import`. If no override exists, an error will be given. An example is below:
275
+
276
+ ```sh
277
+ $ pulumi import vsphere:index/dpmHostOverride:DpmHostOverride dpm_host_override \\
278
+ ```
279
+
280
+ '{"compute_cluster_path": "/dc1/host/cluster1", \\
281
+
282
+ "host_path": "/dc1/host/esxi1"}'
283
+
228
284
  :param str resource_name: The name of the resource.
229
285
  :param DpmHostOverrideArgs args: The arguments to use to populate this resource's properties.
230
286
  :param pulumi.ResourceOptions opts: Options for the resource.
@@ -204,7 +204,37 @@ class DrsVmOverride(pulumi.CustomResource):
204
204
  virtual_machine_id: Optional[pulumi.Input[str]] = None,
205
205
  __props__=None):
206
206
  """
207
- Create a DrsVmOverride resource with the given unique name, props, and options.
207
+ The `DrsVmOverride` resource can be used to add a DRS override to a
208
+ cluster for a specific virtual machine. With this resource, one can enable or
209
+ disable DRS and control the automation level for a single virtual machine
210
+ without affecting the rest of the cluster.
211
+
212
+ For more information on vSphere clusters and DRS, see [this
213
+ page][ref-vsphere-drs-clusters].
214
+
215
+ [ref-vsphere-drs-clusters]: https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-resource-management/GUID-8ACF3502-5314-469F-8CC9-4A9BD5925BC2.html
216
+
217
+ > **NOTE:** This resource requires vCenter and is not available on direct ESXi
218
+ connections.
219
+
220
+ ## Import
221
+
222
+ An existing override can be imported into this resource by
223
+
224
+ supplying both the path to the cluster, and the path to the virtual machine, to
225
+
226
+ `pulumi import`. If no override exists, an error will be given. An example
227
+
228
+ is below:
229
+
230
+ ```sh
231
+ $ pulumi import vsphere:index/drsVmOverride:DrsVmOverride drs_vm_override \\
232
+ ```
233
+
234
+ '{"compute_cluster_path": "/dc1/host/cluster1", \\
235
+
236
+ "virtual_machine_path": "/dc1/vm/srv1"}'
237
+
208
238
  :param str resource_name: The name of the resource.
209
239
  :param pulumi.ResourceOptions opts: Options for the resource.
210
240
  :param pulumi.Input[str] compute_cluster_id: The managed object reference
@@ -229,7 +259,37 @@ class DrsVmOverride(pulumi.CustomResource):
229
259
  args: DrsVmOverrideArgs,
230
260
  opts: Optional[pulumi.ResourceOptions] = None):
231
261
  """
232
- Create a DrsVmOverride resource with the given unique name, props, and options.
262
+ The `DrsVmOverride` resource can be used to add a DRS override to a
263
+ cluster for a specific virtual machine. With this resource, one can enable or
264
+ disable DRS and control the automation level for a single virtual machine
265
+ without affecting the rest of the cluster.
266
+
267
+ For more information on vSphere clusters and DRS, see [this
268
+ page][ref-vsphere-drs-clusters].
269
+
270
+ [ref-vsphere-drs-clusters]: https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-resource-management/GUID-8ACF3502-5314-469F-8CC9-4A9BD5925BC2.html
271
+
272
+ > **NOTE:** This resource requires vCenter and is not available on direct ESXi
273
+ connections.
274
+
275
+ ## Import
276
+
277
+ An existing override can be imported into this resource by
278
+
279
+ supplying both the path to the cluster, and the path to the virtual machine, to
280
+
281
+ `pulumi import`. If no override exists, an error will be given. An example
282
+
283
+ is below:
284
+
285
+ ```sh
286
+ $ pulumi import vsphere:index/drsVmOverride:DrsVmOverride drs_vm_override \\
287
+ ```
288
+
289
+ '{"compute_cluster_path": "/dc1/host/cluster1", \\
290
+
291
+ "virtual_machine_path": "/dc1/vm/srv1"}'
292
+
233
293
  :param str resource_name: The name of the resource.
234
294
  :param DrsVmOverrideArgs args: The arguments to use to populate this resource's properties.
235
295
  :param pulumi.ResourceOptions opts: Options for the resource.
pulumi_vsphere/folder.py CHANGED
@@ -285,7 +285,74 @@ class Folder(pulumi.CustomResource):
285
285
  type: Optional[pulumi.Input[str]] = None,
286
286
  __props__=None):
287
287
  """
288
- Create a Folder resource with the given unique name, props, and options.
288
+ The `Folder` resource can be used to manage vSphere inventory folders.
289
+ The resource supports creating folders of the 5 major types - datacenter
290
+ folders, host and cluster folders, virtual machine folders, storage folders,
291
+ and network folders.
292
+
293
+ Paths are always relative to the specific type of folder you are creating.
294
+ A subfolder is discovered by parsing the relative path specified in `path`, so
295
+ `foo/bar` will create a folder named `bar` in the parent folder `foo`, as long
296
+ as that folder exists.
297
+
298
+ ## Example Usage
299
+
300
+ The basic example below creates a virtual machine folder named
301
+ `test-folder` in the default datacenter's VM hierarchy.
302
+
303
+ ```python
304
+ import pulumi
305
+ import pulumi_vsphere as vsphere
306
+
307
+ datacenter = vsphere.get_datacenter()
308
+ folder = vsphere.Folder("folder",
309
+ path="test-folder",
310
+ type="vm",
311
+ datacenter_id=datacenter.id)
312
+ ```
313
+
314
+ ### Example with subfolders
315
+
316
+ The below example builds off of the above by first creating a folder named
317
+ `test-parent`, and then locating `test-folder` in that
318
+ folder. To ensure the parent is created first, we create an interpolation
319
+ dependency off the parent's `path` attribute.
320
+
321
+ Note that if you change parents (for example, went from the above basic
322
+ configuration to this one), your folder will be moved to be under the correct
323
+ parent.
324
+
325
+ ```python
326
+ import pulumi
327
+ import pulumi_vsphere as vsphere
328
+
329
+ datacenter = vsphere.get_datacenter()
330
+ parent = vsphere.Folder("parent",
331
+ path="test-parent",
332
+ type="vm",
333
+ datacenter_id=datacenter.id)
334
+ folder = vsphere.Folder("folder",
335
+ path=parent.path.apply(lambda path: f"{path}/test-folder"),
336
+ type="vm",
337
+ datacenter_id=datacenter.id)
338
+ ```
339
+
340
+ ## Import
341
+
342
+ An existing folder can be imported into this resource via
343
+
344
+ its full path, via the following command:
345
+
346
+ ```sh
347
+ $ pulumi import vsphere:index/folder:Folder folder /default-dc/vm/terraform-test-folder
348
+ ```
349
+
350
+ The above command would import the folder from our examples above, the VM
351
+
352
+ folder named `terraform-test-folder` located in the datacenter named
353
+
354
+ `default-dc`.
355
+
289
356
  :param str resource_name: The name of the resource.
290
357
  :param pulumi.ResourceOptions opts: Options for the resource.
291
358
  :param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: Map of custom attribute ids to attribute
@@ -323,7 +390,74 @@ class Folder(pulumi.CustomResource):
323
390
  args: FolderArgs,
324
391
  opts: Optional[pulumi.ResourceOptions] = None):
325
392
  """
326
- Create a Folder resource with the given unique name, props, and options.
393
+ The `Folder` resource can be used to manage vSphere inventory folders.
394
+ The resource supports creating folders of the 5 major types - datacenter
395
+ folders, host and cluster folders, virtual machine folders, storage folders,
396
+ and network folders.
397
+
398
+ Paths are always relative to the specific type of folder you are creating.
399
+ A subfolder is discovered by parsing the relative path specified in `path`, so
400
+ `foo/bar` will create a folder named `bar` in the parent folder `foo`, as long
401
+ as that folder exists.
402
+
403
+ ## Example Usage
404
+
405
+ The basic example below creates a virtual machine folder named
406
+ `test-folder` in the default datacenter's VM hierarchy.
407
+
408
+ ```python
409
+ import pulumi
410
+ import pulumi_vsphere as vsphere
411
+
412
+ datacenter = vsphere.get_datacenter()
413
+ folder = vsphere.Folder("folder",
414
+ path="test-folder",
415
+ type="vm",
416
+ datacenter_id=datacenter.id)
417
+ ```
418
+
419
+ ### Example with subfolders
420
+
421
+ The below example builds off of the above by first creating a folder named
422
+ `test-parent`, and then locating `test-folder` in that
423
+ folder. To ensure the parent is created first, we create an interpolation
424
+ dependency off the parent's `path` attribute.
425
+
426
+ Note that if you change parents (for example, went from the above basic
427
+ configuration to this one), your folder will be moved to be under the correct
428
+ parent.
429
+
430
+ ```python
431
+ import pulumi
432
+ import pulumi_vsphere as vsphere
433
+
434
+ datacenter = vsphere.get_datacenter()
435
+ parent = vsphere.Folder("parent",
436
+ path="test-parent",
437
+ type="vm",
438
+ datacenter_id=datacenter.id)
439
+ folder = vsphere.Folder("folder",
440
+ path=parent.path.apply(lambda path: f"{path}/test-folder"),
441
+ type="vm",
442
+ datacenter_id=datacenter.id)
443
+ ```
444
+
445
+ ## Import
446
+
447
+ An existing folder can be imported into this resource via
448
+
449
+ its full path, via the following command:
450
+
451
+ ```sh
452
+ $ pulumi import vsphere:index/folder:Folder folder /default-dc/vm/terraform-test-folder
453
+ ```
454
+
455
+ The above command would import the folder from our examples above, the VM
456
+
457
+ folder named `terraform-test-folder` located in the datacenter named
458
+
459
+ `default-dc`.
460
+
327
461
  :param str resource_name: The name of the resource.
328
462
  :param FolderArgs args: The arguments to use to populate this resource's properties.
329
463
  :param pulumi.ResourceOptions opts: Options for the resource.
@@ -96,7 +96,7 @@ def get_compute_cluster_host_group(compute_cluster_id: Optional[str] = None,
96
96
  compute_cluster_id=cluster.id)
97
97
  host_rule = vsphere.ComputeClusterVmHostRule("host_rule",
98
98
  compute_cluster_id=cluster.id,
99
- name="terraform-host-rule1",
99
+ name="pulumi-host-rule1",
100
100
  vm_group_name="vmgroup-01",
101
101
  affinity_host_group_name=host_group.name)
102
102
  ```
@@ -144,7 +144,7 @@ def get_compute_cluster_host_group_output(compute_cluster_id: Optional[pulumi.In
144
144
  compute_cluster_id=cluster.id)
145
145
  host_rule = vsphere.ComputeClusterVmHostRule("host_rule",
146
146
  compute_cluster_id=cluster.id,
147
- name="terraform-host-rule1",
147
+ name="pulumi-host-rule1",
148
148
  vm_group_name="vmgroup-01",
149
149
  affinity_host_group_name=host_group.name)
150
150
  ```