pulumi-vsphere 4.10.0a1710245029__py3-none-any.whl → 4.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-vsphere might be problematic. Click here for more details.
- pulumi_vsphere/__init__.py +28 -0
- pulumi_vsphere/_inputs.py +566 -236
- pulumi_vsphere/_utilities.py +35 -0
- pulumi_vsphere/compute_cluster.py +747 -1477
- pulumi_vsphere/compute_cluster_vm_affinity_rule.py +28 -20
- pulumi_vsphere/datacenter.py +33 -40
- pulumi_vsphere/datastore_cluster.py +154 -364
- pulumi_vsphere/distributed_port_group.py +126 -182
- pulumi_vsphere/distributed_virtual_switch.py +301 -819
- pulumi_vsphere/entity_permissions.py +56 -35
- pulumi_vsphere/file.py +16 -24
- pulumi_vsphere/folder.py +7 -28
- pulumi_vsphere/get_compute_cluster.py +0 -4
- pulumi_vsphere/get_compute_cluster_host_group.py +20 -20
- pulumi_vsphere/get_content_library.py +10 -10
- pulumi_vsphere/get_content_library_item.py +12 -8
- pulumi_vsphere/get_custom_attribute.py +0 -4
- pulumi_vsphere/get_datacenter.py +0 -4
- pulumi_vsphere/get_datastore.py +9 -13
- pulumi_vsphere/get_datastore_cluster.py +0 -4
- pulumi_vsphere/get_datastore_stats.py +38 -44
- pulumi_vsphere/get_distributed_virtual_switch.py +2 -4
- pulumi_vsphere/get_dynamic.py +18 -20
- pulumi_vsphere/get_folder.py +10 -6
- pulumi_vsphere/get_guest_os_customization.py +8 -47
- pulumi_vsphere/get_host.py +0 -4
- pulumi_vsphere/get_host_base_images.py +97 -0
- pulumi_vsphere/get_host_pci_device.py +8 -14
- pulumi_vsphere/get_host_thumbprint.py +12 -16
- pulumi_vsphere/get_host_vgpu_profile.py +4 -10
- pulumi_vsphere/get_license.py +2 -5
- pulumi_vsphere/get_network.py +14 -18
- pulumi_vsphere/get_policy.py +0 -4
- pulumi_vsphere/get_resource_pool.py +14 -18
- pulumi_vsphere/get_role.py +4 -8
- pulumi_vsphere/get_tag.py +0 -4
- pulumi_vsphere/get_tag_category.py +0 -4
- pulumi_vsphere/get_vapp_container.py +0 -4
- pulumi_vsphere/get_virtual_machine.py +58 -41
- pulumi_vsphere/get_vmfs_disks.py +0 -4
- pulumi_vsphere/guest_os_customization.py +50 -0
- pulumi_vsphere/ha_vm_override.py +189 -378
- pulumi_vsphere/host.py +0 -20
- pulumi_vsphere/host_port_group.py +12 -24
- pulumi_vsphere/host_virtual_switch.py +140 -287
- pulumi_vsphere/license.py +0 -32
- pulumi_vsphere/nas_datastore.py +7 -7
- pulumi_vsphere/offline_software_depot.py +180 -0
- pulumi_vsphere/outputs.py +591 -270
- pulumi_vsphere/provider.py +2 -6
- pulumi_vsphere/pulumi-plugin.json +2 -1
- pulumi_vsphere/resource_pool.py +50 -24
- pulumi_vsphere/supervisor.py +962 -0
- pulumi_vsphere/virtual_disk.py +14 -20
- pulumi_vsphere/virtual_machine.py +580 -809
- pulumi_vsphere/virtual_machine_class.py +442 -0
- pulumi_vsphere/virtual_machine_snapshot.py +8 -12
- pulumi_vsphere/vm_storage_policy.py +74 -86
- pulumi_vsphere/vnic.py +61 -77
- {pulumi_vsphere-4.10.0a1710245029.dist-info → pulumi_vsphere-4.10.2.dist-info}/METADATA +1 -1
- pulumi_vsphere-4.10.2.dist-info/RECORD +86 -0
- {pulumi_vsphere-4.10.0a1710245029.dist-info → pulumi_vsphere-4.10.2.dist-info}/WHEEL +1 -1
- pulumi_vsphere-4.10.0a1710245029.dist-info/RECORD +0 -82
- {pulumi_vsphere-4.10.0a1710245029.dist-info → pulumi_vsphere-4.10.2.dist-info}/top_level.txt +0 -0
|
@@ -144,7 +144,6 @@ class VmStoragePolicy(pulumi.CustomResource):
|
|
|
144
144
|
|
|
145
145
|
In this example, tags are first applied to datastores.
|
|
146
146
|
|
|
147
|
-
<!--Start PulumiCodeChooser -->
|
|
148
147
|
```python
|
|
149
148
|
import pulumi
|
|
150
149
|
import pulumi_vsphere as vsphere
|
|
@@ -152,98 +151,93 @@ class VmStoragePolicy(pulumi.CustomResource):
|
|
|
152
151
|
environment = vsphere.get_tag_category(name="environment")
|
|
153
152
|
service_level = vsphere.get_tag_category(name="service_level")
|
|
154
153
|
replication = vsphere.get_tag_category(name="replication")
|
|
155
|
-
production = vsphere.get_tag(
|
|
156
|
-
|
|
157
|
-
development = vsphere.get_tag(
|
|
158
|
-
|
|
159
|
-
platinum = vsphere.get_tag(
|
|
160
|
-
|
|
161
|
-
gold = vsphere.get_tag(
|
|
162
|
-
|
|
163
|
-
silver = vsphere.get_tag(
|
|
164
|
-
|
|
165
|
-
bronze = vsphere.get_tag(
|
|
166
|
-
|
|
167
|
-
replicated = vsphere.get_tag(
|
|
168
|
-
|
|
169
|
-
non_replicated = vsphere.get_tag(
|
|
170
|
-
|
|
171
|
-
prod_datastore = vsphere.VmfsDatastore("
|
|
154
|
+
production = vsphere.get_tag(name="production",
|
|
155
|
+
category_id="data.vsphere_tag_category.environment.id")
|
|
156
|
+
development = vsphere.get_tag(name="development",
|
|
157
|
+
category_id="data.vsphere_tag_category.environment.id")
|
|
158
|
+
platinum = vsphere.get_tag(name="platinum",
|
|
159
|
+
category_id="data.vsphere_tag_category.service_level.id")
|
|
160
|
+
gold = vsphere.get_tag(name="platinum",
|
|
161
|
+
category_id="data.vsphere_tag_category.service_level.id")
|
|
162
|
+
silver = vsphere.get_tag(name="silver",
|
|
163
|
+
category_id="data.vsphere_tag_category.service_level.id")
|
|
164
|
+
bronze = vsphere.get_tag(name="bronze",
|
|
165
|
+
category_id="data.vsphere_tag_category.service_level.id")
|
|
166
|
+
replicated = vsphere.get_tag(name="replicated",
|
|
167
|
+
category_id="data.vsphere_tag_category.replication.id")
|
|
168
|
+
non_replicated = vsphere.get_tag(name="non_replicated",
|
|
169
|
+
category_id="data.vsphere_tag_category.replication.id")
|
|
170
|
+
prod_datastore = vsphere.VmfsDatastore("prod_datastore", tags=[
|
|
172
171
|
"data.vsphere_tag.production.id",
|
|
173
172
|
"data.vsphere_tag.platinum.id",
|
|
174
173
|
"data.vsphere_tag.replicated.id",
|
|
175
174
|
])
|
|
176
|
-
dev_datastore = vsphere.NasDatastore("
|
|
175
|
+
dev_datastore = vsphere.NasDatastore("dev_datastore", tags=[
|
|
177
176
|
"data.vsphere_tag.development.id",
|
|
178
177
|
"data.vsphere_tag.silver.id",
|
|
179
178
|
"data.vsphere_tag.non_replicated.id",
|
|
180
179
|
])
|
|
181
180
|
```
|
|
182
|
-
<!--End PulumiCodeChooser -->
|
|
183
181
|
|
|
184
182
|
Next, storage policies are created and `tag_rules` are applied.
|
|
185
183
|
|
|
186
|
-
<!--Start PulumiCodeChooser -->
|
|
187
184
|
```python
|
|
188
185
|
import pulumi
|
|
189
186
|
import pulumi_vsphere as vsphere
|
|
190
187
|
|
|
191
|
-
prod_platinum_replicated = vsphere.VmStoragePolicy("
|
|
188
|
+
prod_platinum_replicated = vsphere.VmStoragePolicy("prod_platinum_replicated",
|
|
189
|
+
name="prod_platinum_replicated",
|
|
192
190
|
description="prod_platinum_replicated",
|
|
193
191
|
tag_rules=[
|
|
194
192
|
vsphere.VmStoragePolicyTagRuleArgs(
|
|
195
|
-
tag_category=
|
|
196
|
-
tags=[
|
|
193
|
+
tag_category=environment["name"],
|
|
194
|
+
tags=[production["name"]],
|
|
197
195
|
include_datastores_with_tags=True,
|
|
198
196
|
),
|
|
199
197
|
vsphere.VmStoragePolicyTagRuleArgs(
|
|
200
|
-
tag_category=
|
|
201
|
-
tags=[
|
|
198
|
+
tag_category=service_level["name"],
|
|
199
|
+
tags=[platinum["name"]],
|
|
202
200
|
include_datastores_with_tags=True,
|
|
203
201
|
),
|
|
204
202
|
vsphere.VmStoragePolicyTagRuleArgs(
|
|
205
|
-
tag_category=
|
|
206
|
-
tags=[
|
|
203
|
+
tag_category=replication["name"],
|
|
204
|
+
tags=[replicated["name"]],
|
|
207
205
|
include_datastores_with_tags=True,
|
|
208
206
|
),
|
|
209
207
|
])
|
|
210
|
-
dev_silver_nonreplicated = vsphere.VmStoragePolicy("
|
|
208
|
+
dev_silver_nonreplicated = vsphere.VmStoragePolicy("dev_silver_nonreplicated",
|
|
209
|
+
name="dev_silver_nonreplicated",
|
|
211
210
|
description="dev_silver_nonreplicated",
|
|
212
211
|
tag_rules=[
|
|
213
212
|
vsphere.VmStoragePolicyTagRuleArgs(
|
|
214
|
-
tag_category=
|
|
215
|
-
tags=[
|
|
213
|
+
tag_category=environment["name"],
|
|
214
|
+
tags=[development["name"]],
|
|
216
215
|
include_datastores_with_tags=True,
|
|
217
216
|
),
|
|
218
217
|
vsphere.VmStoragePolicyTagRuleArgs(
|
|
219
|
-
tag_category=
|
|
220
|
-
tags=[
|
|
218
|
+
tag_category=service_level["name"],
|
|
219
|
+
tags=[silver["name"]],
|
|
221
220
|
include_datastores_with_tags=True,
|
|
222
221
|
),
|
|
223
222
|
vsphere.VmStoragePolicyTagRuleArgs(
|
|
224
|
-
tag_category=
|
|
225
|
-
tags=[
|
|
223
|
+
tag_category=replication["name"],
|
|
224
|
+
tags=[non_replicated["name"]],
|
|
226
225
|
include_datastores_with_tags=True,
|
|
227
226
|
),
|
|
228
227
|
])
|
|
229
228
|
```
|
|
230
|
-
<!--End PulumiCodeChooser -->
|
|
231
229
|
|
|
232
|
-
|
|
230
|
+
Lastly, when creating a virtual machine resource, a storage policy can be specified to direct virtual machine placement to a datastore which matches the policy's `tags_rules`.
|
|
233
231
|
|
|
234
|
-
<!--Start PulumiCodeChooser -->
|
|
235
232
|
```python
|
|
236
233
|
import pulumi
|
|
237
234
|
import pulumi_vsphere as vsphere
|
|
238
235
|
|
|
239
236
|
prod_platinum_replicated = vsphere.get_policy(name="prod_platinum_replicated")
|
|
240
237
|
dev_silver_nonreplicated = vsphere.get_policy(name="dev_silver_nonreplicated")
|
|
241
|
-
prod_vm = vsphere.VirtualMachine("
|
|
242
|
-
|
|
243
|
-
dev_vm = vsphere.VirtualMachine("devVm", storage_policy_id=data["vsphere_storage_policy"]["storage_policy"]["dev_silver_nonreplicated"]["id"])
|
|
244
|
-
# ... other configuration ...
|
|
238
|
+
prod_vm = vsphere.VirtualMachine("prod_vm", storage_policy_id=storage_policy["prodPlatinumReplicated"]["id"])
|
|
239
|
+
dev_vm = vsphere.VirtualMachine("dev_vm", storage_policy_id=storage_policy["devSilverNonreplicated"]["id"])
|
|
245
240
|
```
|
|
246
|
-
<!--End PulumiCodeChooser -->
|
|
247
241
|
|
|
248
242
|
:param str resource_name: The name of the resource.
|
|
249
243
|
:param pulumi.ResourceOptions opts: Options for the resource.
|
|
@@ -268,7 +262,6 @@ class VmStoragePolicy(pulumi.CustomResource):
|
|
|
268
262
|
|
|
269
263
|
In this example, tags are first applied to datastores.
|
|
270
264
|
|
|
271
|
-
<!--Start PulumiCodeChooser -->
|
|
272
265
|
```python
|
|
273
266
|
import pulumi
|
|
274
267
|
import pulumi_vsphere as vsphere
|
|
@@ -276,98 +269,93 @@ class VmStoragePolicy(pulumi.CustomResource):
|
|
|
276
269
|
environment = vsphere.get_tag_category(name="environment")
|
|
277
270
|
service_level = vsphere.get_tag_category(name="service_level")
|
|
278
271
|
replication = vsphere.get_tag_category(name="replication")
|
|
279
|
-
production = vsphere.get_tag(
|
|
280
|
-
|
|
281
|
-
development = vsphere.get_tag(
|
|
282
|
-
|
|
283
|
-
platinum = vsphere.get_tag(
|
|
284
|
-
|
|
285
|
-
gold = vsphere.get_tag(
|
|
286
|
-
|
|
287
|
-
silver = vsphere.get_tag(
|
|
288
|
-
|
|
289
|
-
bronze = vsphere.get_tag(
|
|
290
|
-
|
|
291
|
-
replicated = vsphere.get_tag(
|
|
292
|
-
|
|
293
|
-
non_replicated = vsphere.get_tag(
|
|
294
|
-
|
|
295
|
-
prod_datastore = vsphere.VmfsDatastore("
|
|
272
|
+
production = vsphere.get_tag(name="production",
|
|
273
|
+
category_id="data.vsphere_tag_category.environment.id")
|
|
274
|
+
development = vsphere.get_tag(name="development",
|
|
275
|
+
category_id="data.vsphere_tag_category.environment.id")
|
|
276
|
+
platinum = vsphere.get_tag(name="platinum",
|
|
277
|
+
category_id="data.vsphere_tag_category.service_level.id")
|
|
278
|
+
gold = vsphere.get_tag(name="platinum",
|
|
279
|
+
category_id="data.vsphere_tag_category.service_level.id")
|
|
280
|
+
silver = vsphere.get_tag(name="silver",
|
|
281
|
+
category_id="data.vsphere_tag_category.service_level.id")
|
|
282
|
+
bronze = vsphere.get_tag(name="bronze",
|
|
283
|
+
category_id="data.vsphere_tag_category.service_level.id")
|
|
284
|
+
replicated = vsphere.get_tag(name="replicated",
|
|
285
|
+
category_id="data.vsphere_tag_category.replication.id")
|
|
286
|
+
non_replicated = vsphere.get_tag(name="non_replicated",
|
|
287
|
+
category_id="data.vsphere_tag_category.replication.id")
|
|
288
|
+
prod_datastore = vsphere.VmfsDatastore("prod_datastore", tags=[
|
|
296
289
|
"data.vsphere_tag.production.id",
|
|
297
290
|
"data.vsphere_tag.platinum.id",
|
|
298
291
|
"data.vsphere_tag.replicated.id",
|
|
299
292
|
])
|
|
300
|
-
dev_datastore = vsphere.NasDatastore("
|
|
293
|
+
dev_datastore = vsphere.NasDatastore("dev_datastore", tags=[
|
|
301
294
|
"data.vsphere_tag.development.id",
|
|
302
295
|
"data.vsphere_tag.silver.id",
|
|
303
296
|
"data.vsphere_tag.non_replicated.id",
|
|
304
297
|
])
|
|
305
298
|
```
|
|
306
|
-
<!--End PulumiCodeChooser -->
|
|
307
299
|
|
|
308
300
|
Next, storage policies are created and `tag_rules` are applied.
|
|
309
301
|
|
|
310
|
-
<!--Start PulumiCodeChooser -->
|
|
311
302
|
```python
|
|
312
303
|
import pulumi
|
|
313
304
|
import pulumi_vsphere as vsphere
|
|
314
305
|
|
|
315
|
-
prod_platinum_replicated = vsphere.VmStoragePolicy("
|
|
306
|
+
prod_platinum_replicated = vsphere.VmStoragePolicy("prod_platinum_replicated",
|
|
307
|
+
name="prod_platinum_replicated",
|
|
316
308
|
description="prod_platinum_replicated",
|
|
317
309
|
tag_rules=[
|
|
318
310
|
vsphere.VmStoragePolicyTagRuleArgs(
|
|
319
|
-
tag_category=
|
|
320
|
-
tags=[
|
|
311
|
+
tag_category=environment["name"],
|
|
312
|
+
tags=[production["name"]],
|
|
321
313
|
include_datastores_with_tags=True,
|
|
322
314
|
),
|
|
323
315
|
vsphere.VmStoragePolicyTagRuleArgs(
|
|
324
|
-
tag_category=
|
|
325
|
-
tags=[
|
|
316
|
+
tag_category=service_level["name"],
|
|
317
|
+
tags=[platinum["name"]],
|
|
326
318
|
include_datastores_with_tags=True,
|
|
327
319
|
),
|
|
328
320
|
vsphere.VmStoragePolicyTagRuleArgs(
|
|
329
|
-
tag_category=
|
|
330
|
-
tags=[
|
|
321
|
+
tag_category=replication["name"],
|
|
322
|
+
tags=[replicated["name"]],
|
|
331
323
|
include_datastores_with_tags=True,
|
|
332
324
|
),
|
|
333
325
|
])
|
|
334
|
-
dev_silver_nonreplicated = vsphere.VmStoragePolicy("
|
|
326
|
+
dev_silver_nonreplicated = vsphere.VmStoragePolicy("dev_silver_nonreplicated",
|
|
327
|
+
name="dev_silver_nonreplicated",
|
|
335
328
|
description="dev_silver_nonreplicated",
|
|
336
329
|
tag_rules=[
|
|
337
330
|
vsphere.VmStoragePolicyTagRuleArgs(
|
|
338
|
-
tag_category=
|
|
339
|
-
tags=[
|
|
331
|
+
tag_category=environment["name"],
|
|
332
|
+
tags=[development["name"]],
|
|
340
333
|
include_datastores_with_tags=True,
|
|
341
334
|
),
|
|
342
335
|
vsphere.VmStoragePolicyTagRuleArgs(
|
|
343
|
-
tag_category=
|
|
344
|
-
tags=[
|
|
336
|
+
tag_category=service_level["name"],
|
|
337
|
+
tags=[silver["name"]],
|
|
345
338
|
include_datastores_with_tags=True,
|
|
346
339
|
),
|
|
347
340
|
vsphere.VmStoragePolicyTagRuleArgs(
|
|
348
|
-
tag_category=
|
|
349
|
-
tags=[
|
|
341
|
+
tag_category=replication["name"],
|
|
342
|
+
tags=[non_replicated["name"]],
|
|
350
343
|
include_datastores_with_tags=True,
|
|
351
344
|
),
|
|
352
345
|
])
|
|
353
346
|
```
|
|
354
|
-
<!--End PulumiCodeChooser -->
|
|
355
347
|
|
|
356
|
-
|
|
348
|
+
Lastly, when creating a virtual machine resource, a storage policy can be specified to direct virtual machine placement to a datastore which matches the policy's `tags_rules`.
|
|
357
349
|
|
|
358
|
-
<!--Start PulumiCodeChooser -->
|
|
359
350
|
```python
|
|
360
351
|
import pulumi
|
|
361
352
|
import pulumi_vsphere as vsphere
|
|
362
353
|
|
|
363
354
|
prod_platinum_replicated = vsphere.get_policy(name="prod_platinum_replicated")
|
|
364
355
|
dev_silver_nonreplicated = vsphere.get_policy(name="dev_silver_nonreplicated")
|
|
365
|
-
prod_vm = vsphere.VirtualMachine("
|
|
366
|
-
|
|
367
|
-
dev_vm = vsphere.VirtualMachine("devVm", storage_policy_id=data["vsphere_storage_policy"]["storage_policy"]["dev_silver_nonreplicated"]["id"])
|
|
368
|
-
# ... other configuration ...
|
|
356
|
+
prod_vm = vsphere.VirtualMachine("prod_vm", storage_policy_id=storage_policy["prodPlatinumReplicated"]["id"])
|
|
357
|
+
dev_vm = vsphere.VirtualMachine("dev_vm", storage_policy_id=storage_policy["devSilverNonreplicated"]["id"])
|
|
369
358
|
```
|
|
370
|
-
<!--End PulumiCodeChooser -->
|
|
371
359
|
|
|
372
360
|
:param str resource_name: The name of the resource.
|
|
373
361
|
:param VmStoragePolicyArgs args: The arguments to use to populate this resource's properties.
|
pulumi_vsphere/vnic.py
CHANGED
|
@@ -30,7 +30,7 @@ class VnicArgs:
|
|
|
30
30
|
The set of arguments for constructing a Vnic resource.
|
|
31
31
|
:param pulumi.Input[str] host: ESX host the interface belongs to
|
|
32
32
|
:param pulumi.Input[str] distributed_port_group: Key of the distributed portgroup the nic will connect to.
|
|
33
|
-
:param pulumi.Input[str] distributed_switch_port: UUID of the
|
|
33
|
+
:param pulumi.Input[str] distributed_switch_port: UUID of the vdswitch the nic will be attached to. Do not set if you set portgroup.
|
|
34
34
|
:param pulumi.Input['VnicIpv4Args'] ipv4: IPv4 settings. Either this or `ipv6` needs to be set. See IPv4 options below.
|
|
35
35
|
:param pulumi.Input['VnicIpv6Args'] ipv6: IPv6 settings. Either this or `ipv6` needs to be set. See IPv6 options below.
|
|
36
36
|
:param pulumi.Input[str] mac: MAC address of the interface.
|
|
@@ -87,7 +87,7 @@ class VnicArgs:
|
|
|
87
87
|
@pulumi.getter(name="distributedSwitchPort")
|
|
88
88
|
def distributed_switch_port(self) -> Optional[pulumi.Input[str]]:
|
|
89
89
|
"""
|
|
90
|
-
UUID of the
|
|
90
|
+
UUID of the vdswitch the nic will be attached to. Do not set if you set portgroup.
|
|
91
91
|
"""
|
|
92
92
|
return pulumi.get(self, "distributed_switch_port")
|
|
93
93
|
|
|
@@ -196,7 +196,7 @@ class _VnicState:
|
|
|
196
196
|
"""
|
|
197
197
|
Input properties used for looking up and filtering Vnic resources.
|
|
198
198
|
:param pulumi.Input[str] distributed_port_group: Key of the distributed portgroup the nic will connect to.
|
|
199
|
-
:param pulumi.Input[str] distributed_switch_port: UUID of the
|
|
199
|
+
:param pulumi.Input[str] distributed_switch_port: UUID of the vdswitch the nic will be attached to. Do not set if you set portgroup.
|
|
200
200
|
:param pulumi.Input[str] host: ESX host the interface belongs to
|
|
201
201
|
:param pulumi.Input['VnicIpv4Args'] ipv4: IPv4 settings. Either this or `ipv6` needs to be set. See IPv4 options below.
|
|
202
202
|
:param pulumi.Input['VnicIpv6Args'] ipv6: IPv6 settings. Either this or `ipv6` needs to be set. See IPv6 options below.
|
|
@@ -243,7 +243,7 @@ class _VnicState:
|
|
|
243
243
|
@pulumi.getter(name="distributedSwitchPort")
|
|
244
244
|
def distributed_switch_port(self) -> Optional[pulumi.Input[str]]:
|
|
245
245
|
"""
|
|
246
|
-
UUID of the
|
|
246
|
+
UUID of the vdswitch the nic will be attached to. Do not set if you set portgroup.
|
|
247
247
|
"""
|
|
248
248
|
return pulumi.get(self, "distributed_switch_port")
|
|
249
249
|
|
|
@@ -369,62 +369,61 @@ class Vnic(pulumi.CustomResource):
|
|
|
369
369
|
|
|
370
370
|
## Example Usage
|
|
371
371
|
|
|
372
|
-
### S
|
|
373
|
-
|
|
374
372
|
### Create a vnic attached to a distributed virtual switch using the vmotion TCP/IP stack
|
|
375
373
|
|
|
376
|
-
<!--Start PulumiCodeChooser -->
|
|
377
374
|
```python
|
|
378
375
|
import pulumi
|
|
379
376
|
import pulumi_vsphere as vsphere
|
|
380
377
|
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
datacenter_id=
|
|
384
|
-
|
|
385
|
-
|
|
378
|
+
datacenter = vsphere.get_datacenter(name="dc-01")
|
|
379
|
+
host = vsphere.get_host(name="esxi-01.example.com",
|
|
380
|
+
datacenter_id=datacenter.id)
|
|
381
|
+
vds = vsphere.DistributedVirtualSwitch("vds",
|
|
382
|
+
name="vds-01",
|
|
383
|
+
datacenter_id=datacenter.id,
|
|
386
384
|
hosts=[vsphere.DistributedVirtualSwitchHostArgs(
|
|
387
|
-
host_system_id=
|
|
385
|
+
host_system_id=host.id,
|
|
388
386
|
devices=["vnic3"],
|
|
389
387
|
)])
|
|
390
|
-
|
|
388
|
+
pg = vsphere.DistributedPortGroup("pg",
|
|
389
|
+
name="pg-01",
|
|
391
390
|
vlan_id=1234,
|
|
392
|
-
distributed_virtual_switch_uuid=
|
|
393
|
-
|
|
394
|
-
host=
|
|
395
|
-
distributed_switch_port=
|
|
396
|
-
distributed_port_group=
|
|
391
|
+
distributed_virtual_switch_uuid=vds.id)
|
|
392
|
+
vnic = vsphere.Vnic("vnic",
|
|
393
|
+
host=host.id,
|
|
394
|
+
distributed_switch_port=vds.id,
|
|
395
|
+
distributed_port_group=pg.id,
|
|
397
396
|
ipv4=vsphere.VnicIpv4Args(
|
|
398
397
|
dhcp=True,
|
|
399
398
|
),
|
|
400
399
|
netstack="vmotion")
|
|
401
400
|
```
|
|
402
|
-
<!--End PulumiCodeChooser -->
|
|
403
401
|
|
|
404
402
|
### Create a vnic attached to a portgroup using the default TCP/IP stack
|
|
405
403
|
|
|
406
|
-
<!--Start PulumiCodeChooser -->
|
|
407
404
|
```python
|
|
408
405
|
import pulumi
|
|
409
406
|
import pulumi_vsphere as vsphere
|
|
410
407
|
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
datacenter_id=
|
|
414
|
-
|
|
415
|
-
|
|
408
|
+
datacenter = vsphere.get_datacenter(name="dc-01")
|
|
409
|
+
host = vsphere.get_host(name="esxi-01.example.com",
|
|
410
|
+
datacenter_id=datacenter.id)
|
|
411
|
+
hvs = vsphere.HostVirtualSwitch("hvs",
|
|
412
|
+
name="hvs-01",
|
|
413
|
+
host_system_id=host.id,
|
|
416
414
|
network_adapters=[
|
|
417
415
|
"vmnic3",
|
|
418
416
|
"vmnic4",
|
|
419
417
|
],
|
|
420
418
|
active_nics=["vmnic3"],
|
|
421
419
|
standby_nics=["vmnic4"])
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
420
|
+
pg = vsphere.HostPortGroup("pg",
|
|
421
|
+
name="pg-01",
|
|
422
|
+
virtual_switch_name=hvs.name,
|
|
423
|
+
host_system_id=host.id)
|
|
424
|
+
vnic = vsphere.Vnic("vnic",
|
|
425
|
+
host=host.id,
|
|
426
|
+
portgroup=pg.name,
|
|
428
427
|
ipv4=vsphere.VnicIpv4Args(
|
|
429
428
|
dhcp=True,
|
|
430
429
|
),
|
|
@@ -433,7 +432,6 @@ class Vnic(pulumi.CustomResource):
|
|
|
433
432
|
"management",
|
|
434
433
|
])
|
|
435
434
|
```
|
|
436
|
-
<!--End PulumiCodeChooser -->
|
|
437
435
|
|
|
438
436
|
## Importing
|
|
439
437
|
|
|
@@ -442,18 +440,12 @@ class Vnic(pulumi.CustomResource):
|
|
|
442
440
|
|
|
443
441
|
[docs-import]: /docs/import/index.html
|
|
444
442
|
|
|
445
|
-
<!--Start PulumiCodeChooser -->
|
|
446
|
-
```python
|
|
447
|
-
import pulumi
|
|
448
|
-
```
|
|
449
|
-
<!--End PulumiCodeChooser -->
|
|
450
|
-
|
|
451
443
|
The above would import the vnic `vmk2` from host with ID `host-123`.
|
|
452
444
|
|
|
453
445
|
:param str resource_name: The name of the resource.
|
|
454
446
|
:param pulumi.ResourceOptions opts: Options for the resource.
|
|
455
447
|
:param pulumi.Input[str] distributed_port_group: Key of the distributed portgroup the nic will connect to.
|
|
456
|
-
:param pulumi.Input[str] distributed_switch_port: UUID of the
|
|
448
|
+
:param pulumi.Input[str] distributed_switch_port: UUID of the vdswitch the nic will be attached to. Do not set if you set portgroup.
|
|
457
449
|
:param pulumi.Input[str] host: ESX host the interface belongs to
|
|
458
450
|
:param pulumi.Input[pulumi.InputType['VnicIpv4Args']] ipv4: IPv4 settings. Either this or `ipv6` needs to be set. See IPv4 options below.
|
|
459
451
|
:param pulumi.Input[pulumi.InputType['VnicIpv6Args']] ipv6: IPv6 settings. Either this or `ipv6` needs to be set. See IPv6 options below.
|
|
@@ -474,62 +466,61 @@ class Vnic(pulumi.CustomResource):
|
|
|
474
466
|
|
|
475
467
|
## Example Usage
|
|
476
468
|
|
|
477
|
-
### S
|
|
478
|
-
|
|
479
469
|
### Create a vnic attached to a distributed virtual switch using the vmotion TCP/IP stack
|
|
480
470
|
|
|
481
|
-
<!--Start PulumiCodeChooser -->
|
|
482
471
|
```python
|
|
483
472
|
import pulumi
|
|
484
473
|
import pulumi_vsphere as vsphere
|
|
485
474
|
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
datacenter_id=
|
|
489
|
-
|
|
490
|
-
|
|
475
|
+
datacenter = vsphere.get_datacenter(name="dc-01")
|
|
476
|
+
host = vsphere.get_host(name="esxi-01.example.com",
|
|
477
|
+
datacenter_id=datacenter.id)
|
|
478
|
+
vds = vsphere.DistributedVirtualSwitch("vds",
|
|
479
|
+
name="vds-01",
|
|
480
|
+
datacenter_id=datacenter.id,
|
|
491
481
|
hosts=[vsphere.DistributedVirtualSwitchHostArgs(
|
|
492
|
-
host_system_id=
|
|
482
|
+
host_system_id=host.id,
|
|
493
483
|
devices=["vnic3"],
|
|
494
484
|
)])
|
|
495
|
-
|
|
485
|
+
pg = vsphere.DistributedPortGroup("pg",
|
|
486
|
+
name="pg-01",
|
|
496
487
|
vlan_id=1234,
|
|
497
|
-
distributed_virtual_switch_uuid=
|
|
498
|
-
|
|
499
|
-
host=
|
|
500
|
-
distributed_switch_port=
|
|
501
|
-
distributed_port_group=
|
|
488
|
+
distributed_virtual_switch_uuid=vds.id)
|
|
489
|
+
vnic = vsphere.Vnic("vnic",
|
|
490
|
+
host=host.id,
|
|
491
|
+
distributed_switch_port=vds.id,
|
|
492
|
+
distributed_port_group=pg.id,
|
|
502
493
|
ipv4=vsphere.VnicIpv4Args(
|
|
503
494
|
dhcp=True,
|
|
504
495
|
),
|
|
505
496
|
netstack="vmotion")
|
|
506
497
|
```
|
|
507
|
-
<!--End PulumiCodeChooser -->
|
|
508
498
|
|
|
509
499
|
### Create a vnic attached to a portgroup using the default TCP/IP stack
|
|
510
500
|
|
|
511
|
-
<!--Start PulumiCodeChooser -->
|
|
512
501
|
```python
|
|
513
502
|
import pulumi
|
|
514
503
|
import pulumi_vsphere as vsphere
|
|
515
504
|
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
datacenter_id=
|
|
519
|
-
|
|
520
|
-
|
|
505
|
+
datacenter = vsphere.get_datacenter(name="dc-01")
|
|
506
|
+
host = vsphere.get_host(name="esxi-01.example.com",
|
|
507
|
+
datacenter_id=datacenter.id)
|
|
508
|
+
hvs = vsphere.HostVirtualSwitch("hvs",
|
|
509
|
+
name="hvs-01",
|
|
510
|
+
host_system_id=host.id,
|
|
521
511
|
network_adapters=[
|
|
522
512
|
"vmnic3",
|
|
523
513
|
"vmnic4",
|
|
524
514
|
],
|
|
525
515
|
active_nics=["vmnic3"],
|
|
526
516
|
standby_nics=["vmnic4"])
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
517
|
+
pg = vsphere.HostPortGroup("pg",
|
|
518
|
+
name="pg-01",
|
|
519
|
+
virtual_switch_name=hvs.name,
|
|
520
|
+
host_system_id=host.id)
|
|
521
|
+
vnic = vsphere.Vnic("vnic",
|
|
522
|
+
host=host.id,
|
|
523
|
+
portgroup=pg.name,
|
|
533
524
|
ipv4=vsphere.VnicIpv4Args(
|
|
534
525
|
dhcp=True,
|
|
535
526
|
),
|
|
@@ -538,7 +529,6 @@ class Vnic(pulumi.CustomResource):
|
|
|
538
529
|
"management",
|
|
539
530
|
])
|
|
540
531
|
```
|
|
541
|
-
<!--End PulumiCodeChooser -->
|
|
542
532
|
|
|
543
533
|
## Importing
|
|
544
534
|
|
|
@@ -547,12 +537,6 @@ class Vnic(pulumi.CustomResource):
|
|
|
547
537
|
|
|
548
538
|
[docs-import]: /docs/import/index.html
|
|
549
539
|
|
|
550
|
-
<!--Start PulumiCodeChooser -->
|
|
551
|
-
```python
|
|
552
|
-
import pulumi
|
|
553
|
-
```
|
|
554
|
-
<!--End PulumiCodeChooser -->
|
|
555
|
-
|
|
556
540
|
The above would import the vnic `vmk2` from host with ID `host-123`.
|
|
557
541
|
|
|
558
542
|
:param str resource_name: The name of the resource.
|
|
@@ -629,7 +613,7 @@ class Vnic(pulumi.CustomResource):
|
|
|
629
613
|
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
|
|
630
614
|
:param pulumi.ResourceOptions opts: Options for the resource.
|
|
631
615
|
:param pulumi.Input[str] distributed_port_group: Key of the distributed portgroup the nic will connect to.
|
|
632
|
-
:param pulumi.Input[str] distributed_switch_port: UUID of the
|
|
616
|
+
:param pulumi.Input[str] distributed_switch_port: UUID of the vdswitch the nic will be attached to. Do not set if you set portgroup.
|
|
633
617
|
:param pulumi.Input[str] host: ESX host the interface belongs to
|
|
634
618
|
:param pulumi.Input[pulumi.InputType['VnicIpv4Args']] ipv4: IPv4 settings. Either this or `ipv6` needs to be set. See IPv4 options below.
|
|
635
619
|
:param pulumi.Input[pulumi.InputType['VnicIpv6Args']] ipv6: IPv6 settings. Either this or `ipv6` needs to be set. See IPv6 options below.
|
|
@@ -667,7 +651,7 @@ class Vnic(pulumi.CustomResource):
|
|
|
667
651
|
@pulumi.getter(name="distributedSwitchPort")
|
|
668
652
|
def distributed_switch_port(self) -> pulumi.Output[Optional[str]]:
|
|
669
653
|
"""
|
|
670
|
-
UUID of the
|
|
654
|
+
UUID of the vdswitch the nic will be attached to. Do not set if you set portgroup.
|
|
671
655
|
"""
|
|
672
656
|
return pulumi.get(self, "distributed_switch_port")
|
|
673
657
|
|