pulumi-vsphere 4.9.0a1698129503__py3-none-any.whl → 4.9.0a1698198425__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-vsphere might be problematic. Click here for more details.
- pulumi_vsphere/_inputs.py +311 -93
- pulumi_vsphere/_utilities.py +19 -0
- pulumi_vsphere/compute_cluster.py +259 -3
- pulumi_vsphere/compute_cluster_host_group.py +17 -3
- pulumi_vsphere/compute_cluster_vm_affinity_rule.py +20 -130
- pulumi_vsphere/compute_cluster_vm_anti_affinity_rule.py +20 -4
- pulumi_vsphere/compute_cluster_vm_dependency_rule.py +27 -5
- pulumi_vsphere/compute_cluster_vm_group.py +17 -3
- pulumi_vsphere/compute_cluster_vm_host_rule.py +28 -4
- pulumi_vsphere/content_library.py +15 -13
- pulumi_vsphere/content_library_item.py +21 -3
- pulumi_vsphere/custom_attribute.py +10 -2
- pulumi_vsphere/datacenter.py +10 -34
- pulumi_vsphere/datastore_cluster.py +97 -3
- pulumi_vsphere/datastore_cluster_vm_anti_affinity_rule.py +20 -4
- pulumi_vsphere/distributed_port_group.py +175 -3
- pulumi_vsphere/distributed_virtual_switch.py +355 -115
- pulumi_vsphere/dpm_host_override.py +28 -4
- pulumi_vsphere/drs_vm_override.py +28 -4
- pulumi_vsphere/entity_permissions.py +23 -5
- pulumi_vsphere/file.py +35 -61
- pulumi_vsphere/folder.py +20 -4
- pulumi_vsphere/get_compute_cluster.py +0 -22
- pulumi_vsphere/get_compute_cluster_host_group.py +0 -34
- pulumi_vsphere/get_content_library.py +0 -18
- pulumi_vsphere/get_custom_attribute.py +0 -18
- pulumi_vsphere/get_datacenter.py +0 -18
- pulumi_vsphere/get_datastore.py +0 -22
- pulumi_vsphere/get_datastore_cluster.py +0 -22
- pulumi_vsphere/get_distributed_virtual_switch.py +0 -40
- pulumi_vsphere/get_dynamic.py +0 -38
- pulumi_vsphere/get_folder.py +0 -18
- pulumi_vsphere/get_host.py +0 -22
- pulumi_vsphere/get_host_pci_device.py +2 -26
- pulumi_vsphere/get_host_thumbprint.py +0 -18
- pulumi_vsphere/get_license.py +0 -18
- pulumi_vsphere/get_network.py +0 -22
- pulumi_vsphere/get_policy.py +0 -20
- pulumi_vsphere/get_resource_pool.py +0 -64
- pulumi_vsphere/get_role.py +0 -18
- pulumi_vsphere/get_tag.py +0 -22
- pulumi_vsphere/get_tag_category.py +0 -18
- pulumi_vsphere/get_vapp_container.py +0 -22
- pulumi_vsphere/get_virtual_machine.py +0 -54
- pulumi_vsphere/get_vmfs_disks.py +0 -28
- pulumi_vsphere/ha_vm_override.py +72 -4
- pulumi_vsphere/host.py +23 -101
- pulumi_vsphere/host_port_group.py +74 -132
- pulumi_vsphere/host_virtual_switch.py +87 -5
- pulumi_vsphere/license.py +15 -31
- pulumi_vsphere/nas_datastore.py +53 -5
- pulumi_vsphere/outputs.py +364 -104
- pulumi_vsphere/provider.py +31 -3
- pulumi_vsphere/resource_pool.py +61 -83
- pulumi_vsphere/role.py +10 -2
- pulumi_vsphere/storage_drs_vm_override.py +32 -4
- pulumi_vsphere/tag.py +13 -3
- pulumi_vsphere/tag_category.py +16 -4
- pulumi_vsphere/vapp_container.py +61 -3
- pulumi_vsphere/vapp_entity.py +48 -4
- pulumi_vsphere/virtual_disk.py +27 -5
- pulumi_vsphere/virtual_machine.py +284 -18
- pulumi_vsphere/virtual_machine_snapshot.py +33 -39
- pulumi_vsphere/vm_storage_policy.py +13 -205
- pulumi_vsphere/vmfs_datastore.py +32 -4
- pulumi_vsphere/vnic.py +19 -153
- {pulumi_vsphere-4.9.0a1698129503.dist-info → pulumi_vsphere-4.9.0a1698198425.dist-info}/METADATA +1 -1
- pulumi_vsphere-4.9.0a1698198425.dist-info/RECORD +77 -0
- pulumi_vsphere-4.9.0a1698129503.dist-info/RECORD +0 -77
- {pulumi_vsphere-4.9.0a1698129503.dist-info → pulumi_vsphere-4.9.0a1698198425.dist-info}/WHEEL +0 -0
- {pulumi_vsphere-4.9.0a1698129503.dist-info → pulumi_vsphere-4.9.0a1698198425.dist-info}/top_level.txt +0 -0
|
@@ -34,10 +34,16 @@ class VmStoragePolicyArgs:
|
|
|
34
34
|
@staticmethod
|
|
35
35
|
def _configure(
|
|
36
36
|
_setter: Callable[[Any, Any], None],
|
|
37
|
-
tag_rules: pulumi.Input[Sequence[pulumi.Input['VmStoragePolicyTagRuleArgs']]],
|
|
37
|
+
tag_rules: Optional[pulumi.Input[Sequence[pulumi.Input['VmStoragePolicyTagRuleArgs']]]] = None,
|
|
38
38
|
description: Optional[pulumi.Input[str]] = None,
|
|
39
39
|
name: Optional[pulumi.Input[str]] = None,
|
|
40
|
-
opts: Optional[pulumi.ResourceOptions]=None
|
|
40
|
+
opts: Optional[pulumi.ResourceOptions] = None,
|
|
41
|
+
**kwargs):
|
|
42
|
+
if tag_rules is None and 'tagRules' in kwargs:
|
|
43
|
+
tag_rules = kwargs['tagRules']
|
|
44
|
+
if tag_rules is None:
|
|
45
|
+
raise TypeError("Missing 'tag_rules' argument")
|
|
46
|
+
|
|
41
47
|
_setter("tag_rules", tag_rules)
|
|
42
48
|
if description is not None:
|
|
43
49
|
_setter("description", description)
|
|
@@ -105,7 +111,11 @@ class _VmStoragePolicyState:
|
|
|
105
111
|
description: Optional[pulumi.Input[str]] = None,
|
|
106
112
|
name: Optional[pulumi.Input[str]] = None,
|
|
107
113
|
tag_rules: Optional[pulumi.Input[Sequence[pulumi.Input['VmStoragePolicyTagRuleArgs']]]] = None,
|
|
108
|
-
opts: Optional[pulumi.ResourceOptions]=None
|
|
114
|
+
opts: Optional[pulumi.ResourceOptions] = None,
|
|
115
|
+
**kwargs):
|
|
116
|
+
if tag_rules is None and 'tagRules' in kwargs:
|
|
117
|
+
tag_rules = kwargs['tagRules']
|
|
118
|
+
|
|
109
119
|
if description is not None:
|
|
110
120
|
_setter("description", description)
|
|
111
121
|
if name is not None:
|
|
@@ -164,107 +174,6 @@ class VmStoragePolicy(pulumi.CustomResource):
|
|
|
164
174
|
policies. Using this resource, tag based placement rules can be created to
|
|
165
175
|
place virtual machines on a datastore with matching tags. If storage requirements for the applications on the virtual machine change, you can modify the storage policy that was originally applied to the virtual machine.
|
|
166
176
|
|
|
167
|
-
## Example Usage
|
|
168
|
-
|
|
169
|
-
The following example creates storage policies with `tag_rules` base on sets of environment, service level, and replication attributes.
|
|
170
|
-
|
|
171
|
-
In this example, tags are first applied to datastores.
|
|
172
|
-
|
|
173
|
-
```python
|
|
174
|
-
import pulumi
|
|
175
|
-
import pulumi_vsphere as vsphere
|
|
176
|
-
|
|
177
|
-
environment = vsphere.get_tag_category(name="environment")
|
|
178
|
-
service_level = vsphere.get_tag_category(name="service_level")
|
|
179
|
-
replication = vsphere.get_tag_category(name="replication")
|
|
180
|
-
production = vsphere.get_tag(category_id="data.vsphere_tag_category.environment.id",
|
|
181
|
-
name="production")
|
|
182
|
-
development = vsphere.get_tag(category_id="data.vsphere_tag_category.environment.id",
|
|
183
|
-
name="development")
|
|
184
|
-
platinum = vsphere.get_tag(category_id="data.vsphere_tag_category.service_level.id",
|
|
185
|
-
name="platinum")
|
|
186
|
-
gold = vsphere.get_tag(category_id="data.vsphere_tag_category.service_level.id",
|
|
187
|
-
name="platinum")
|
|
188
|
-
silver = vsphere.get_tag(category_id="data.vsphere_tag_category.service_level.id",
|
|
189
|
-
name="silver")
|
|
190
|
-
bronze = vsphere.get_tag(category_id="data.vsphere_tag_category.service_level.id",
|
|
191
|
-
name="bronze")
|
|
192
|
-
replicated = vsphere.get_tag(category_id="data.vsphere_tag_category.replication.id",
|
|
193
|
-
name="replicated")
|
|
194
|
-
non_replicated = vsphere.get_tag(category_id="data.vsphere_tag_category.replication.id",
|
|
195
|
-
name="non_replicated")
|
|
196
|
-
prod_datastore = vsphere.VmfsDatastore("prodDatastore", tags=[
|
|
197
|
-
"data.vsphere_tag.production.id",
|
|
198
|
-
"data.vsphere_tag.platinum.id",
|
|
199
|
-
"data.vsphere_tag.replicated.id",
|
|
200
|
-
])
|
|
201
|
-
dev_datastore = vsphere.NasDatastore("devDatastore", tags=[
|
|
202
|
-
"data.vsphere_tag.development.id",
|
|
203
|
-
"data.vsphere_tag.silver.id",
|
|
204
|
-
"data.vsphere_tag.non_replicated.id",
|
|
205
|
-
])
|
|
206
|
-
```
|
|
207
|
-
|
|
208
|
-
Next, storage policies are created and `tag_rules` are applied.
|
|
209
|
-
|
|
210
|
-
```python
|
|
211
|
-
import pulumi
|
|
212
|
-
import pulumi_vsphere as vsphere
|
|
213
|
-
|
|
214
|
-
prod_platinum_replicated = vsphere.VmStoragePolicy("prodPlatinumReplicated",
|
|
215
|
-
description="prod_platinum_replicated",
|
|
216
|
-
tag_rules=[
|
|
217
|
-
vsphere.VmStoragePolicyTagRuleArgs(
|
|
218
|
-
tag_category=data["vsphere_tag_category"]["environment"]["name"],
|
|
219
|
-
tags=[data["vsphere_tag"]["production"]["name"]],
|
|
220
|
-
include_datastores_with_tags=True,
|
|
221
|
-
),
|
|
222
|
-
vsphere.VmStoragePolicyTagRuleArgs(
|
|
223
|
-
tag_category=data["vsphere_tag_category"]["service_level"]["name"],
|
|
224
|
-
tags=[data["vsphere_tag"]["platinum"]["name"]],
|
|
225
|
-
include_datastores_with_tags=True,
|
|
226
|
-
),
|
|
227
|
-
vsphere.VmStoragePolicyTagRuleArgs(
|
|
228
|
-
tag_category=data["vsphere_tag_category"]["replication"]["name"],
|
|
229
|
-
tags=[data["vsphere_tag"]["replicated"]["name"]],
|
|
230
|
-
include_datastores_with_tags=True,
|
|
231
|
-
),
|
|
232
|
-
])
|
|
233
|
-
dev_silver_nonreplicated = vsphere.VmStoragePolicy("devSilverNonreplicated",
|
|
234
|
-
description="dev_silver_nonreplicated",
|
|
235
|
-
tag_rules=[
|
|
236
|
-
vsphere.VmStoragePolicyTagRuleArgs(
|
|
237
|
-
tag_category=data["vsphere_tag_category"]["environment"]["name"],
|
|
238
|
-
tags=[data["vsphere_tag"]["development"]["name"]],
|
|
239
|
-
include_datastores_with_tags=True,
|
|
240
|
-
),
|
|
241
|
-
vsphere.VmStoragePolicyTagRuleArgs(
|
|
242
|
-
tag_category=data["vsphere_tag_category"]["service_level"]["name"],
|
|
243
|
-
tags=[data["vsphere_tag"]["silver"]["name"]],
|
|
244
|
-
include_datastores_with_tags=True,
|
|
245
|
-
),
|
|
246
|
-
vsphere.VmStoragePolicyTagRuleArgs(
|
|
247
|
-
tag_category=data["vsphere_tag_category"]["replication"]["name"],
|
|
248
|
-
tags=[data["vsphere_tag"]["non_replicated"]["name"]],
|
|
249
|
-
include_datastores_with_tags=True,
|
|
250
|
-
),
|
|
251
|
-
])
|
|
252
|
-
```
|
|
253
|
-
|
|
254
|
-
Lasttly, when creating a virtual machine resource, a storage policy can be specificed to direct virtual machine placement to a datastore which matches the policy's `tags_rules`.
|
|
255
|
-
|
|
256
|
-
```python
|
|
257
|
-
import pulumi
|
|
258
|
-
import pulumi_vsphere as vsphere
|
|
259
|
-
|
|
260
|
-
prod_platinum_replicated = vsphere.get_policy(name="prod_platinum_replicated")
|
|
261
|
-
dev_silver_nonreplicated = vsphere.get_policy(name="dev_silver_nonreplicated")
|
|
262
|
-
prod_vm = vsphere.VirtualMachine("prodVm", storage_policy_id=data["vsphere_storage_policy"]["storage_policy"]["prod_platinum_replicated"]["id"])
|
|
263
|
-
# ... other configuration ...
|
|
264
|
-
dev_vm = vsphere.VirtualMachine("devVm", storage_policy_id=data["vsphere_storage_policy"]["storage_policy"]["dev_silver_nonreplicated"]["id"])
|
|
265
|
-
# ... other configuration ...
|
|
266
|
-
```
|
|
267
|
-
|
|
268
177
|
:param str resource_name: The name of the resource.
|
|
269
178
|
:param pulumi.ResourceOptions opts: Options for the resource.
|
|
270
179
|
:param pulumi.Input[str] description: Description of the storage policy.
|
|
@@ -282,107 +191,6 @@ class VmStoragePolicy(pulumi.CustomResource):
|
|
|
282
191
|
policies. Using this resource, tag based placement rules can be created to
|
|
283
192
|
place virtual machines on a datastore with matching tags. If storage requirements for the applications on the virtual machine change, you can modify the storage policy that was originally applied to the virtual machine.
|
|
284
193
|
|
|
285
|
-
## Example Usage
|
|
286
|
-
|
|
287
|
-
The following example creates storage policies with `tag_rules` base on sets of environment, service level, and replication attributes.
|
|
288
|
-
|
|
289
|
-
In this example, tags are first applied to datastores.
|
|
290
|
-
|
|
291
|
-
```python
|
|
292
|
-
import pulumi
|
|
293
|
-
import pulumi_vsphere as vsphere
|
|
294
|
-
|
|
295
|
-
environment = vsphere.get_tag_category(name="environment")
|
|
296
|
-
service_level = vsphere.get_tag_category(name="service_level")
|
|
297
|
-
replication = vsphere.get_tag_category(name="replication")
|
|
298
|
-
production = vsphere.get_tag(category_id="data.vsphere_tag_category.environment.id",
|
|
299
|
-
name="production")
|
|
300
|
-
development = vsphere.get_tag(category_id="data.vsphere_tag_category.environment.id",
|
|
301
|
-
name="development")
|
|
302
|
-
platinum = vsphere.get_tag(category_id="data.vsphere_tag_category.service_level.id",
|
|
303
|
-
name="platinum")
|
|
304
|
-
gold = vsphere.get_tag(category_id="data.vsphere_tag_category.service_level.id",
|
|
305
|
-
name="platinum")
|
|
306
|
-
silver = vsphere.get_tag(category_id="data.vsphere_tag_category.service_level.id",
|
|
307
|
-
name="silver")
|
|
308
|
-
bronze = vsphere.get_tag(category_id="data.vsphere_tag_category.service_level.id",
|
|
309
|
-
name="bronze")
|
|
310
|
-
replicated = vsphere.get_tag(category_id="data.vsphere_tag_category.replication.id",
|
|
311
|
-
name="replicated")
|
|
312
|
-
non_replicated = vsphere.get_tag(category_id="data.vsphere_tag_category.replication.id",
|
|
313
|
-
name="non_replicated")
|
|
314
|
-
prod_datastore = vsphere.VmfsDatastore("prodDatastore", tags=[
|
|
315
|
-
"data.vsphere_tag.production.id",
|
|
316
|
-
"data.vsphere_tag.platinum.id",
|
|
317
|
-
"data.vsphere_tag.replicated.id",
|
|
318
|
-
])
|
|
319
|
-
dev_datastore = vsphere.NasDatastore("devDatastore", tags=[
|
|
320
|
-
"data.vsphere_tag.development.id",
|
|
321
|
-
"data.vsphere_tag.silver.id",
|
|
322
|
-
"data.vsphere_tag.non_replicated.id",
|
|
323
|
-
])
|
|
324
|
-
```
|
|
325
|
-
|
|
326
|
-
Next, storage policies are created and `tag_rules` are applied.
|
|
327
|
-
|
|
328
|
-
```python
|
|
329
|
-
import pulumi
|
|
330
|
-
import pulumi_vsphere as vsphere
|
|
331
|
-
|
|
332
|
-
prod_platinum_replicated = vsphere.VmStoragePolicy("prodPlatinumReplicated",
|
|
333
|
-
description="prod_platinum_replicated",
|
|
334
|
-
tag_rules=[
|
|
335
|
-
vsphere.VmStoragePolicyTagRuleArgs(
|
|
336
|
-
tag_category=data["vsphere_tag_category"]["environment"]["name"],
|
|
337
|
-
tags=[data["vsphere_tag"]["production"]["name"]],
|
|
338
|
-
include_datastores_with_tags=True,
|
|
339
|
-
),
|
|
340
|
-
vsphere.VmStoragePolicyTagRuleArgs(
|
|
341
|
-
tag_category=data["vsphere_tag_category"]["service_level"]["name"],
|
|
342
|
-
tags=[data["vsphere_tag"]["platinum"]["name"]],
|
|
343
|
-
include_datastores_with_tags=True,
|
|
344
|
-
),
|
|
345
|
-
vsphere.VmStoragePolicyTagRuleArgs(
|
|
346
|
-
tag_category=data["vsphere_tag_category"]["replication"]["name"],
|
|
347
|
-
tags=[data["vsphere_tag"]["replicated"]["name"]],
|
|
348
|
-
include_datastores_with_tags=True,
|
|
349
|
-
),
|
|
350
|
-
])
|
|
351
|
-
dev_silver_nonreplicated = vsphere.VmStoragePolicy("devSilverNonreplicated",
|
|
352
|
-
description="dev_silver_nonreplicated",
|
|
353
|
-
tag_rules=[
|
|
354
|
-
vsphere.VmStoragePolicyTagRuleArgs(
|
|
355
|
-
tag_category=data["vsphere_tag_category"]["environment"]["name"],
|
|
356
|
-
tags=[data["vsphere_tag"]["development"]["name"]],
|
|
357
|
-
include_datastores_with_tags=True,
|
|
358
|
-
),
|
|
359
|
-
vsphere.VmStoragePolicyTagRuleArgs(
|
|
360
|
-
tag_category=data["vsphere_tag_category"]["service_level"]["name"],
|
|
361
|
-
tags=[data["vsphere_tag"]["silver"]["name"]],
|
|
362
|
-
include_datastores_with_tags=True,
|
|
363
|
-
),
|
|
364
|
-
vsphere.VmStoragePolicyTagRuleArgs(
|
|
365
|
-
tag_category=data["vsphere_tag_category"]["replication"]["name"],
|
|
366
|
-
tags=[data["vsphere_tag"]["non_replicated"]["name"]],
|
|
367
|
-
include_datastores_with_tags=True,
|
|
368
|
-
),
|
|
369
|
-
])
|
|
370
|
-
```
|
|
371
|
-
|
|
372
|
-
Lasttly, when creating a virtual machine resource, a storage policy can be specificed to direct virtual machine placement to a datastore which matches the policy's `tags_rules`.
|
|
373
|
-
|
|
374
|
-
```python
|
|
375
|
-
import pulumi
|
|
376
|
-
import pulumi_vsphere as vsphere
|
|
377
|
-
|
|
378
|
-
prod_platinum_replicated = vsphere.get_policy(name="prod_platinum_replicated")
|
|
379
|
-
dev_silver_nonreplicated = vsphere.get_policy(name="dev_silver_nonreplicated")
|
|
380
|
-
prod_vm = vsphere.VirtualMachine("prodVm", storage_policy_id=data["vsphere_storage_policy"]["storage_policy"]["prod_platinum_replicated"]["id"])
|
|
381
|
-
# ... other configuration ...
|
|
382
|
-
dev_vm = vsphere.VirtualMachine("devVm", storage_policy_id=data["vsphere_storage_policy"]["storage_policy"]["dev_silver_nonreplicated"]["id"])
|
|
383
|
-
# ... other configuration ...
|
|
384
|
-
```
|
|
385
|
-
|
|
386
194
|
:param str resource_name: The name of the resource.
|
|
387
195
|
:param VmStoragePolicyArgs args: The arguments to use to populate this resource's properties.
|
|
388
196
|
:param pulumi.ResourceOptions opts: Options for the resource.
|
pulumi_vsphere/vmfs_datastore.py
CHANGED
|
@@ -64,14 +64,26 @@ class VmfsDatastoreArgs:
|
|
|
64
64
|
@staticmethod
|
|
65
65
|
def _configure(
|
|
66
66
|
_setter: Callable[[Any, Any], None],
|
|
67
|
-
disks: pulumi.Input[Sequence[pulumi.Input[str]]],
|
|
68
|
-
host_system_id: pulumi.Input[str],
|
|
67
|
+
disks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
68
|
+
host_system_id: Optional[pulumi.Input[str]] = None,
|
|
69
69
|
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
|
|
70
70
|
datastore_cluster_id: Optional[pulumi.Input[str]] = None,
|
|
71
71
|
folder: Optional[pulumi.Input[str]] = None,
|
|
72
72
|
name: Optional[pulumi.Input[str]] = None,
|
|
73
73
|
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
74
|
-
opts: Optional[pulumi.ResourceOptions]=None
|
|
74
|
+
opts: Optional[pulumi.ResourceOptions] = None,
|
|
75
|
+
**kwargs):
|
|
76
|
+
if disks is None:
|
|
77
|
+
raise TypeError("Missing 'disks' argument")
|
|
78
|
+
if host_system_id is None and 'hostSystemId' in kwargs:
|
|
79
|
+
host_system_id = kwargs['hostSystemId']
|
|
80
|
+
if host_system_id is None:
|
|
81
|
+
raise TypeError("Missing 'host_system_id' argument")
|
|
82
|
+
if custom_attributes is None and 'customAttributes' in kwargs:
|
|
83
|
+
custom_attributes = kwargs['customAttributes']
|
|
84
|
+
if datastore_cluster_id is None and 'datastoreClusterId' in kwargs:
|
|
85
|
+
datastore_cluster_id = kwargs['datastoreClusterId']
|
|
86
|
+
|
|
75
87
|
_setter("disks", disks)
|
|
76
88
|
_setter("host_system_id", host_system_id)
|
|
77
89
|
if custom_attributes is not None:
|
|
@@ -281,7 +293,23 @@ class _VmfsDatastoreState:
|
|
|
281
293
|
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
282
294
|
uncommitted_space: Optional[pulumi.Input[int]] = None,
|
|
283
295
|
url: Optional[pulumi.Input[str]] = None,
|
|
284
|
-
opts: Optional[pulumi.ResourceOptions]=None
|
|
296
|
+
opts: Optional[pulumi.ResourceOptions] = None,
|
|
297
|
+
**kwargs):
|
|
298
|
+
if custom_attributes is None and 'customAttributes' in kwargs:
|
|
299
|
+
custom_attributes = kwargs['customAttributes']
|
|
300
|
+
if datastore_cluster_id is None and 'datastoreClusterId' in kwargs:
|
|
301
|
+
datastore_cluster_id = kwargs['datastoreClusterId']
|
|
302
|
+
if free_space is None and 'freeSpace' in kwargs:
|
|
303
|
+
free_space = kwargs['freeSpace']
|
|
304
|
+
if host_system_id is None and 'hostSystemId' in kwargs:
|
|
305
|
+
host_system_id = kwargs['hostSystemId']
|
|
306
|
+
if maintenance_mode is None and 'maintenanceMode' in kwargs:
|
|
307
|
+
maintenance_mode = kwargs['maintenanceMode']
|
|
308
|
+
if multiple_host_access is None and 'multipleHostAccess' in kwargs:
|
|
309
|
+
multiple_host_access = kwargs['multipleHostAccess']
|
|
310
|
+
if uncommitted_space is None and 'uncommittedSpace' in kwargs:
|
|
311
|
+
uncommitted_space = kwargs['uncommittedSpace']
|
|
312
|
+
|
|
285
313
|
if accessible is not None:
|
|
286
314
|
_setter("accessible", accessible)
|
|
287
315
|
if capacity is not None:
|
pulumi_vsphere/vnic.py
CHANGED
|
@@ -55,7 +55,7 @@ class VnicArgs:
|
|
|
55
55
|
@staticmethod
|
|
56
56
|
def _configure(
|
|
57
57
|
_setter: Callable[[Any, Any], None],
|
|
58
|
-
host: pulumi.Input[str],
|
|
58
|
+
host: Optional[pulumi.Input[str]] = None,
|
|
59
59
|
distributed_port_group: Optional[pulumi.Input[str]] = None,
|
|
60
60
|
distributed_switch_port: Optional[pulumi.Input[str]] = None,
|
|
61
61
|
ipv4: Optional[pulumi.Input['VnicIpv4Args']] = None,
|
|
@@ -65,7 +65,15 @@ class VnicArgs:
|
|
|
65
65
|
netstack: Optional[pulumi.Input[str]] = None,
|
|
66
66
|
portgroup: Optional[pulumi.Input[str]] = None,
|
|
67
67
|
services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
68
|
-
opts: Optional[pulumi.ResourceOptions]=None
|
|
68
|
+
opts: Optional[pulumi.ResourceOptions] = None,
|
|
69
|
+
**kwargs):
|
|
70
|
+
if host is None:
|
|
71
|
+
raise TypeError("Missing 'host' argument")
|
|
72
|
+
if distributed_port_group is None and 'distributedPortGroup' in kwargs:
|
|
73
|
+
distributed_port_group = kwargs['distributedPortGroup']
|
|
74
|
+
if distributed_switch_port is None and 'distributedSwitchPort' in kwargs:
|
|
75
|
+
distributed_switch_port = kwargs['distributedSwitchPort']
|
|
76
|
+
|
|
69
77
|
_setter("host", host)
|
|
70
78
|
if distributed_port_group is not None:
|
|
71
79
|
_setter("distributed_port_group", distributed_port_group)
|
|
@@ -259,7 +267,13 @@ class _VnicState:
|
|
|
259
267
|
netstack: Optional[pulumi.Input[str]] = None,
|
|
260
268
|
portgroup: Optional[pulumi.Input[str]] = None,
|
|
261
269
|
services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
262
|
-
opts: Optional[pulumi.ResourceOptions]=None
|
|
270
|
+
opts: Optional[pulumi.ResourceOptions] = None,
|
|
271
|
+
**kwargs):
|
|
272
|
+
if distributed_port_group is None and 'distributedPortGroup' in kwargs:
|
|
273
|
+
distributed_port_group = kwargs['distributedPortGroup']
|
|
274
|
+
if distributed_switch_port is None and 'distributedSwitchPort' in kwargs:
|
|
275
|
+
distributed_switch_port = kwargs['distributedSwitchPort']
|
|
276
|
+
|
|
263
277
|
if distributed_port_group is not None:
|
|
264
278
|
_setter("distributed_port_group", distributed_port_group)
|
|
265
279
|
if distributed_switch_port is not None:
|
|
@@ -424,76 +438,6 @@ class Vnic(pulumi.CustomResource):
|
|
|
424
438
|
## Example Usage
|
|
425
439
|
|
|
426
440
|
### S
|
|
427
|
-
### Create a vnic attached to a distributed virtual switch using the vmotion TCP/IP stack
|
|
428
|
-
|
|
429
|
-
```python
|
|
430
|
-
import pulumi
|
|
431
|
-
import pulumi_vsphere as vsphere
|
|
432
|
-
|
|
433
|
-
dc = vsphere.get_datacenter(name="mydc")
|
|
434
|
-
h1 = vsphere.get_host(name="esxi1.host.test",
|
|
435
|
-
datacenter_id=dc.id)
|
|
436
|
-
d1 = vsphere.DistributedVirtualSwitch("d1",
|
|
437
|
-
datacenter_id=dc.id,
|
|
438
|
-
hosts=[vsphere.DistributedVirtualSwitchHostArgs(
|
|
439
|
-
host_system_id=h1.id,
|
|
440
|
-
devices=["vnic3"],
|
|
441
|
-
)])
|
|
442
|
-
p1 = vsphere.DistributedPortGroup("p1",
|
|
443
|
-
vlan_id=1234,
|
|
444
|
-
distributed_virtual_switch_uuid=d1.id)
|
|
445
|
-
v1 = vsphere.Vnic("v1",
|
|
446
|
-
host=h1.id,
|
|
447
|
-
distributed_switch_port=d1.id,
|
|
448
|
-
distributed_port_group=p1.id,
|
|
449
|
-
ipv4=vsphere.VnicIpv4Args(
|
|
450
|
-
dhcp=True,
|
|
451
|
-
),
|
|
452
|
-
netstack="vmotion")
|
|
453
|
-
```
|
|
454
|
-
### Create a vnic attached to a portgroup using the default TCP/IP stack
|
|
455
|
-
|
|
456
|
-
```python
|
|
457
|
-
import pulumi
|
|
458
|
-
import pulumi_vsphere as vsphere
|
|
459
|
-
|
|
460
|
-
dc = vsphere.get_datacenter(name="mydc")
|
|
461
|
-
h1 = vsphere.get_host(name="esxi1.host.test",
|
|
462
|
-
datacenter_id=dc.id)
|
|
463
|
-
hvs1 = vsphere.HostVirtualSwitch("hvs1",
|
|
464
|
-
host_system_id=h1.id,
|
|
465
|
-
network_adapters=[
|
|
466
|
-
"vmnic3",
|
|
467
|
-
"vmnic4",
|
|
468
|
-
],
|
|
469
|
-
active_nics=["vmnic3"],
|
|
470
|
-
standby_nics=["vmnic4"])
|
|
471
|
-
p1 = vsphere.HostPortGroup("p1",
|
|
472
|
-
virtual_switch_name=hvs1.name,
|
|
473
|
-
host_system_id=h1.id)
|
|
474
|
-
v1 = vsphere.Vnic("v1",
|
|
475
|
-
host=h1.id,
|
|
476
|
-
portgroup=p1.name,
|
|
477
|
-
ipv4=vsphere.VnicIpv4Args(
|
|
478
|
-
dhcp=True,
|
|
479
|
-
),
|
|
480
|
-
services=[
|
|
481
|
-
"vsan",
|
|
482
|
-
"management",
|
|
483
|
-
])
|
|
484
|
-
```
|
|
485
|
-
## Importing
|
|
486
|
-
|
|
487
|
-
An existing vNic can be [imported][docs-import] into this resource
|
|
488
|
-
via supplying the vNic's ID. An example is below:
|
|
489
|
-
|
|
490
|
-
[docs-import]: /docs/import/index.html
|
|
491
|
-
|
|
492
|
-
```python
|
|
493
|
-
import pulumi
|
|
494
|
-
```
|
|
495
|
-
|
|
496
|
-
The above would import the vnic `vmk2` from host with ID `host-123`.
|
|
497
441
|
|
|
498
442
|
:param str resource_name: The name of the resource.
|
|
499
443
|
:param pulumi.ResourceOptions opts: Options for the resource.
|
|
@@ -520,76 +464,6 @@ class Vnic(pulumi.CustomResource):
|
|
|
520
464
|
## Example Usage
|
|
521
465
|
|
|
522
466
|
### S
|
|
523
|
-
### Create a vnic attached to a distributed virtual switch using the vmotion TCP/IP stack
|
|
524
|
-
|
|
525
|
-
```python
|
|
526
|
-
import pulumi
|
|
527
|
-
import pulumi_vsphere as vsphere
|
|
528
|
-
|
|
529
|
-
dc = vsphere.get_datacenter(name="mydc")
|
|
530
|
-
h1 = vsphere.get_host(name="esxi1.host.test",
|
|
531
|
-
datacenter_id=dc.id)
|
|
532
|
-
d1 = vsphere.DistributedVirtualSwitch("d1",
|
|
533
|
-
datacenter_id=dc.id,
|
|
534
|
-
hosts=[vsphere.DistributedVirtualSwitchHostArgs(
|
|
535
|
-
host_system_id=h1.id,
|
|
536
|
-
devices=["vnic3"],
|
|
537
|
-
)])
|
|
538
|
-
p1 = vsphere.DistributedPortGroup("p1",
|
|
539
|
-
vlan_id=1234,
|
|
540
|
-
distributed_virtual_switch_uuid=d1.id)
|
|
541
|
-
v1 = vsphere.Vnic("v1",
|
|
542
|
-
host=h1.id,
|
|
543
|
-
distributed_switch_port=d1.id,
|
|
544
|
-
distributed_port_group=p1.id,
|
|
545
|
-
ipv4=vsphere.VnicIpv4Args(
|
|
546
|
-
dhcp=True,
|
|
547
|
-
),
|
|
548
|
-
netstack="vmotion")
|
|
549
|
-
```
|
|
550
|
-
### Create a vnic attached to a portgroup using the default TCP/IP stack
|
|
551
|
-
|
|
552
|
-
```python
|
|
553
|
-
import pulumi
|
|
554
|
-
import pulumi_vsphere as vsphere
|
|
555
|
-
|
|
556
|
-
dc = vsphere.get_datacenter(name="mydc")
|
|
557
|
-
h1 = vsphere.get_host(name="esxi1.host.test",
|
|
558
|
-
datacenter_id=dc.id)
|
|
559
|
-
hvs1 = vsphere.HostVirtualSwitch("hvs1",
|
|
560
|
-
host_system_id=h1.id,
|
|
561
|
-
network_adapters=[
|
|
562
|
-
"vmnic3",
|
|
563
|
-
"vmnic4",
|
|
564
|
-
],
|
|
565
|
-
active_nics=["vmnic3"],
|
|
566
|
-
standby_nics=["vmnic4"])
|
|
567
|
-
p1 = vsphere.HostPortGroup("p1",
|
|
568
|
-
virtual_switch_name=hvs1.name,
|
|
569
|
-
host_system_id=h1.id)
|
|
570
|
-
v1 = vsphere.Vnic("v1",
|
|
571
|
-
host=h1.id,
|
|
572
|
-
portgroup=p1.name,
|
|
573
|
-
ipv4=vsphere.VnicIpv4Args(
|
|
574
|
-
dhcp=True,
|
|
575
|
-
),
|
|
576
|
-
services=[
|
|
577
|
-
"vsan",
|
|
578
|
-
"management",
|
|
579
|
-
])
|
|
580
|
-
```
|
|
581
|
-
## Importing
|
|
582
|
-
|
|
583
|
-
An existing vNic can be [imported][docs-import] into this resource
|
|
584
|
-
via supplying the vNic's ID. An example is below:
|
|
585
|
-
|
|
586
|
-
[docs-import]: /docs/import/index.html
|
|
587
|
-
|
|
588
|
-
```python
|
|
589
|
-
import pulumi
|
|
590
|
-
```
|
|
591
|
-
|
|
592
|
-
The above would import the vnic `vmk2` from host with ID `host-123`.
|
|
593
467
|
|
|
594
468
|
:param str resource_name: The name of the resource.
|
|
595
469
|
:param VnicArgs args: The arguments to use to populate this resource's properties.
|
|
@@ -634,17 +508,9 @@ class Vnic(pulumi.CustomResource):
|
|
|
634
508
|
if host is None and not opts.urn:
|
|
635
509
|
raise TypeError("Missing required property 'host'")
|
|
636
510
|
__props__.__dict__["host"] = host
|
|
637
|
-
|
|
638
|
-
ipv4 = ipv4 or {}
|
|
639
|
-
def _setter(key, value):
|
|
640
|
-
ipv4[key] = value
|
|
641
|
-
VnicIpv4Args._configure(_setter, **ipv4)
|
|
511
|
+
ipv4 = _utilities.configure(ipv4, VnicIpv4Args, True)
|
|
642
512
|
__props__.__dict__["ipv4"] = ipv4
|
|
643
|
-
|
|
644
|
-
ipv6 = ipv6 or {}
|
|
645
|
-
def _setter(key, value):
|
|
646
|
-
ipv6[key] = value
|
|
647
|
-
VnicIpv6Args._configure(_setter, **ipv6)
|
|
513
|
+
ipv6 = _utilities.configure(ipv6, VnicIpv6Args, True)
|
|
648
514
|
__props__.__dict__["ipv6"] = ipv6
|
|
649
515
|
__props__.__dict__["mac"] = mac
|
|
650
516
|
__props__.__dict__["mtu"] = mtu
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
pulumi_vsphere/__init__.py,sha256=Uir3vHk_UEO8Cyuv72-4g5yWUHSa6uGvPdZnZKgLMyE,9765
|
|
2
|
+
pulumi_vsphere/_inputs.py,sha256=52B3uGLoWsC0n1qqyZqvmkCK0C-mRdYk-X5ebemiKGE,114215
|
|
3
|
+
pulumi_vsphere/_utilities.py,sha256=gEJgwmfYYqdEGeUJsGQtt67DByZiBW4wytye4oezFWY,8705
|
|
4
|
+
pulumi_vsphere/compute_cluster.py,sha256=FvszJJPcRtwpUEn761wdb9Gklezk54rJgwLnz4m-OW0,275534
|
|
5
|
+
pulumi_vsphere/compute_cluster_host_group.py,sha256=Y6-QAdTeTOFKz9MiRYyUMS6JpB8mBRW7G-Y0dMfqVyw,15418
|
|
6
|
+
pulumi_vsphere/compute_cluster_vm_affinity_rule.py,sha256=EUuldwM2nSlJBnTzQpt9T4351y3CxUHBvsSxIHwK7fs,21797
|
|
7
|
+
pulumi_vsphere/compute_cluster_vm_anti_affinity_rule.py,sha256=NLr4KcKavWi1qfb8MqFQS0mjd4ZAQ3zzep9KaBhPeeE,20040
|
|
8
|
+
pulumi_vsphere/compute_cluster_vm_dependency_rule.py,sha256=WjwQ_sK1cNF-WUHyuqgVOD8qPMLc7fAYrR06SPAgtgk,24627
|
|
9
|
+
pulumi_vsphere/compute_cluster_vm_group.py,sha256=wnUs1EJnq6bOWZqqBBjAmTqV1WAq6JvYFPxrie2m37M,15490
|
|
10
|
+
pulumi_vsphere/compute_cluster_vm_host_rule.py,sha256=YMstKb36ezW_ea8S7C0wopAA9AI41RLQ8AtBaIfEL_4,27922
|
|
11
|
+
pulumi_vsphere/content_library.py,sha256=PYSAkUBrQ20FdPkDfL4XWSG1lojtG4qwq1eRphWCb7Q,17410
|
|
12
|
+
pulumi_vsphere/content_library_item.py,sha256=9re2RKDc-QAQ7kbQV7DuQfR_rLhPRonz9_szzjKDYPc,18026
|
|
13
|
+
pulumi_vsphere/custom_attribute.py,sha256=5poHb3uXn8msKfczjdmoW6qEYa0Aa3GZ-e4KsULvM1c,10410
|
|
14
|
+
pulumi_vsphere/datacenter.py,sha256=LC-Y_VxONBrLaUkofL-2KNtEVhFXzbjjeNnRwpydxtY,19769
|
|
15
|
+
pulumi_vsphere/datastore_cluster.py,sha256=BUEUDciBmhRwXLDUdNvfDSk0yb_l3E6v6x70325dCEM,105294
|
|
16
|
+
pulumi_vsphere/datastore_cluster_vm_anti_affinity_rule.py,sha256=4GHubDkT1nRBv6rO4M-rT-YzlCuPNw78pVPyqq46vzE,19366
|
|
17
|
+
pulumi_vsphere/distributed_port_group.py,sha256=ppQB9wiyIHIYjkG-ObZWvzvqXth8nxyqy6I-7q3y10E,158186
|
|
18
|
+
pulumi_vsphere/distributed_virtual_switch.py,sha256=FmLBlEdlKdqDtrITPL_RTns4fPUvMJsKxXTEIQUfCcc,306628
|
|
19
|
+
pulumi_vsphere/dpm_host_override.py,sha256=CgSrtumEsq8miJWHzL675uavH0cfi8jDAQGjSjavJwA,18128
|
|
20
|
+
pulumi_vsphere/drs_vm_override.py,sha256=ukR6ip22RxSNfFkWrUry-rhGeFULkmHIAEh8c2jJHCc,19472
|
|
21
|
+
pulumi_vsphere/entity_permissions.py,sha256=3keE0P00hRgqaysaAYe59rl_X9juLWZlq36PN4y3rkM,14490
|
|
22
|
+
pulumi_vsphere/file.py,sha256=glTn97807ZS9bsG5DXZGsAKryeecHVb2C1CCq8jbJ_c,25167
|
|
23
|
+
pulumi_vsphere/folder.py,sha256=ZqzulXw7GyLj4iUR03AkDAgHF1YiZ8tDgxwitddhv9c,27894
|
|
24
|
+
pulumi_vsphere/get_compute_cluster.py,sha256=-Knl-GR3S2sGzAn5I-owrBd8e7AQ5KFpIbeEjCz-7rs,5485
|
|
25
|
+
pulumi_vsphere/get_compute_cluster_host_group.py,sha256=2eOzQSvhjT9c87bVkxpY1upKnnDlrzxA78APRs-YNmI,4966
|
|
26
|
+
pulumi_vsphere/get_content_library.py,sha256=3pPgObV0feDdwADFCY3-EdLgTc4TLG33lhmXiYHZQTk,2894
|
|
27
|
+
pulumi_vsphere/get_content_library_item.py,sha256=7AVusHdHlbF7DxAtbgWyVUKIUnc-hlANldNX5vnYZa4,4506
|
|
28
|
+
pulumi_vsphere/get_custom_attribute.py,sha256=G9TIK8YV7Lb-gcO5bq3s_L3eQ26T1VMlIEqx6TOMxOE,4026
|
|
29
|
+
pulumi_vsphere/get_datacenter.py,sha256=bIaPX9gShWKO4CgmCegTe33vAXr1WxK_eOQkuoSuJ7U,3783
|
|
30
|
+
pulumi_vsphere/get_datastore.py,sha256=0qqQSyxsZqHPvKRAdLyQiAzV0H0GGNt4pvP6-LRYCKk,4246
|
|
31
|
+
pulumi_vsphere/get_datastore_cluster.py,sha256=KRGXlG7kwP4Fezkp2bMtA2qsPGdQ_RY18u71W7TXovc,4617
|
|
32
|
+
pulumi_vsphere/get_distributed_virtual_switch.py,sha256=Jr2y7NNMo382OJWvfMgle3CORxqsH3Qha30Xqvz8x54,5460
|
|
33
|
+
pulumi_vsphere/get_dynamic.py,sha256=xlsRCB7kOr8ftDOS8GPeAz60pLMVw54eRvj3Ugfi2lo,5060
|
|
34
|
+
pulumi_vsphere/get_folder.py,sha256=rxGC64T3C92RbvMQ9Z-IVlCCBCxb5BsuDtiZch2eNWk,3293
|
|
35
|
+
pulumi_vsphere/get_host.py,sha256=BmsRM9SmWtB1CiRZ4jr7aeT7QrarMzJIoBvu6bE1WPI,4714
|
|
36
|
+
pulumi_vsphere/get_host_pci_device.py,sha256=Ace5Q06YBlWk2X95GEvJynhCMYpRLVdl0jezMSDMlRE,6278
|
|
37
|
+
pulumi_vsphere/get_host_thumbprint.py,sha256=8a8-4XbI-asdRDOL3m0bXdT_L3_7_Xjj-WToqKVGKck,4619
|
|
38
|
+
pulumi_vsphere/get_license.py,sha256=u6dgMwC-9lmUxsOPzNGYqMmt-OSMv2SMNLhBzLKR2Jw,4908
|
|
39
|
+
pulumi_vsphere/get_network.py,sha256=G6k5EQdWR-V3Rb4dNAup-keMSXAq0YUB9krPUa3AUiE,6653
|
|
40
|
+
pulumi_vsphere/get_ovf_vm_template.py,sha256=azMZY-3OwaKd45BWaLVsfFfuqIQ8MzAoP0vxXqzwaDs,32683
|
|
41
|
+
pulumi_vsphere/get_policy.py,sha256=rgElQTc23hv8qCnhawqC6av1-hjJp-FDPGujzKeyLVY,2901
|
|
42
|
+
pulumi_vsphere/get_resource_pool.py,sha256=S-LeJAZl7siOkcvDvuWfN3u0kruBCyfmrn3NWlWYOA4,4929
|
|
43
|
+
pulumi_vsphere/get_role.py,sha256=HBgcko7Q5EJ_lEoPQB6WU3dEKqln7g3La5lWb7ESeXA,4849
|
|
44
|
+
pulumi_vsphere/get_tag.py,sha256=pXepTuGDpEJXQzs5VjJ_aPBXuGH70MUkIPCz73H_r8c,4336
|
|
45
|
+
pulumi_vsphere/get_tag_category.py,sha256=Mel-2IlaFrmrXxPqYMtZbgvsBj4gfcw9YVB31V8Qxho,4712
|
|
46
|
+
pulumi_vsphere/get_vapp_container.py,sha256=2KvZwChpnlLFnwf4ptsSkAm9goRYOq4gyjdvbCdgDgE,3971
|
|
47
|
+
pulumi_vsphere/get_virtual_machine.py,sha256=VOWLWMf6J2s4Q8OE1EzCf4XOv_lvM3qnTVU2kUTymSI,54543
|
|
48
|
+
pulumi_vsphere/get_vmfs_disks.py,sha256=kpn-RyFbph-fL_s6JWrUqi8O1shmsXjN-PNHD_H8XC0,5979
|
|
49
|
+
pulumi_vsphere/ha_vm_override.py,sha256=S0HYcy8yE_y8W8CkEdQrA4GWV5AxiV1FHViYn_0SzME,68305
|
|
50
|
+
pulumi_vsphere/host.py,sha256=-aTRvzaw7Cv7t0CzemNK95pQmEOoFPHdhzDFWwydDsE,46637
|
|
51
|
+
pulumi_vsphere/host_port_group.py,sha256=VfgU3kZdg70593ePeJS31PJWN44A3hT9SlawEQSBsZk,61071
|
|
52
|
+
pulumi_vsphere/host_virtual_switch.py,sha256=Ir-QsgaTX9xAuSkgcMptoTtKaCUMdN67RxOOZCBBYKs,71672
|
|
53
|
+
pulumi_vsphere/license.py,sha256=7pMQc1e9n2glPe9kr9_n-swD5NTfd4o-ngfJbG5pmSw,13821
|
|
54
|
+
pulumi_vsphere/nas_datastore.py,sha256=ObM54hOnCNnzLF3ACL1x62p6oc6FSIs_VcY3W0WGiKs,54320
|
|
55
|
+
pulumi_vsphere/outputs.py,sha256=3mwFlGfnEGdaZiTPaTb_qYnazYhtuvowuX5S33iJ-gc,115644
|
|
56
|
+
pulumi_vsphere/provider.py,sha256=my66KhhSRhY9imJ2QGGg1g0SGl_ZW0ZNI3xGAToUz2Q,23725
|
|
57
|
+
pulumi_vsphere/pulumi-plugin.json,sha256=GPkzWdIfUGLp-CeSzzIZ8ksrThmeZnOYMcpaT3JCpTU,44
|
|
58
|
+
pulumi_vsphere/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
59
|
+
pulumi_vsphere/resource_pool.py,sha256=rudeCOIOrwXDyAaGfalo3p6djzPY4LQfPF1QRLgsOBs,57659
|
|
60
|
+
pulumi_vsphere/role.py,sha256=LITxepaKskXOaWnWTd48QVPr1Wz2my14kfoAiOZGxcg,9938
|
|
61
|
+
pulumi_vsphere/storage_drs_vm_override.py,sha256=W7zw6ANsjYMgejPsnRm2B7mpUy1Tw9ET4jI0UO3zpyw,24252
|
|
62
|
+
pulumi_vsphere/tag.py,sha256=dZ7xCYnt5rGi-QF5cLmFA3rPHFqWBw2kFGkq-HNZiBo,11444
|
|
63
|
+
pulumi_vsphere/tag_category.py,sha256=2_OauoVtgbZLvnEq9eXyFTCaCNGDoG4FBQw6prSPlLY,17039
|
|
64
|
+
pulumi_vsphere/vapp_container.py,sha256=7VZcj2gh6TQhXS61JSGpN9kUlAk_xg_29iH3XTCi3BM,55220
|
|
65
|
+
pulumi_vsphere/vapp_entity.py,sha256=mKhUED5wyYRZ88AM2NnWRac8i4Bp8gIfpJfzSgKA2O4,34283
|
|
66
|
+
pulumi_vsphere/virtual_disk.py,sha256=YsM3LSD-ra2blUhvartAkSQcDr5L45v3DrP0dl_2nZg,31552
|
|
67
|
+
pulumi_vsphere/virtual_machine.py,sha256=xTY2KdzPCUJupXT_PaPQDY14f9LCqVLVyYNBdeKH_gE,305233
|
|
68
|
+
pulumi_vsphere/virtual_machine_snapshot.py,sha256=glAxd2gLIe1CtsxOP9fIC_KMiZw45pKw1mlLYuPzR1Q,26262
|
|
69
|
+
pulumi_vsphere/vm_storage_policy.py,sha256=8rh0imD6bd1UWabLEQ9yUiJ4gXhfF-Wl2nRvRiXWiMg,12553
|
|
70
|
+
pulumi_vsphere/vmfs_datastore.py,sha256=eWY7B4ujke3MFYBfomuj-SOdxjFRZ1p5RTjjXsOZhTc,38738
|
|
71
|
+
pulumi_vsphere/vnic.py,sha256=Ea4XM2EKccKh6DoWvMHbrrSp5mNQmQHh2zeykAm8aOM,29155
|
|
72
|
+
pulumi_vsphere/config/__init__.py,sha256=cfY0smRZD3fDVc93ZIAxEl_IM2pynmXB52n3Ahzi030,285
|
|
73
|
+
pulumi_vsphere/config/vars.py,sha256=47kimRKypQEoC8MVkn6ojVd39X_o1bTgG9X7-aMHiPY,3220
|
|
74
|
+
pulumi_vsphere-4.9.0a1698198425.dist-info/METADATA,sha256=QWBFsyDyeAqtoMTFNkEPRpYby1T7yjClvUcysL260C8,4957
|
|
75
|
+
pulumi_vsphere-4.9.0a1698198425.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
|
|
76
|
+
pulumi_vsphere-4.9.0a1698198425.dist-info/top_level.txt,sha256=00BIE8zaYtdsw0_tBfXR8E5sTs3lRnwlcZ6lUdu4loI,15
|
|
77
|
+
pulumi_vsphere-4.9.0a1698198425.dist-info/RECORD,,
|