pulumi-gcp 8.3.1a1727284265__py3-none-any.whl → 8.4.0a1727795436__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. pulumi_gcp/__init__.py +24 -0
  2. pulumi_gcp/apigee/nat_address.py +155 -2
  3. pulumi_gcp/bigquery/_inputs.py +102 -0
  4. pulumi_gcp/bigquery/outputs.py +83 -0
  5. pulumi_gcp/bigquery/table.py +47 -0
  6. pulumi_gcp/cloudrun/_inputs.py +26 -0
  7. pulumi_gcp/cloudrun/outputs.py +33 -0
  8. pulumi_gcp/cloudrun/service.py +76 -0
  9. pulumi_gcp/cloudrunv2/_inputs.py +65 -9
  10. pulumi_gcp/cloudrunv2/outputs.py +73 -8
  11. pulumi_gcp/cloudrunv2/service.py +64 -0
  12. pulumi_gcp/compute/__init__.py +3 -0
  13. pulumi_gcp/compute/_inputs.py +1941 -2
  14. pulumi_gcp/compute/get_region_instance_group_manager.py +438 -0
  15. pulumi_gcp/compute/get_router_nat.py +11 -1
  16. pulumi_gcp/compute/instance.py +7 -7
  17. pulumi_gcp/compute/instance_from_machine_image.py +7 -7
  18. pulumi_gcp/compute/instance_from_template.py +7 -7
  19. pulumi_gcp/compute/interconnect.py +76 -64
  20. pulumi_gcp/compute/outputs.py +4637 -2640
  21. pulumi_gcp/compute/region_commitment.py +47 -0
  22. pulumi_gcp/compute/region_network_firewall_policy_with_rules.py +835 -0
  23. pulumi_gcp/compute/router_nat.py +56 -2
  24. pulumi_gcp/compute/router_nat_address.py +514 -0
  25. pulumi_gcp/compute/subnetwork.py +14 -14
  26. pulumi_gcp/container/_inputs.py +222 -0
  27. pulumi_gcp/container/outputs.py +279 -2
  28. pulumi_gcp/logging/__init__.py +1 -0
  29. pulumi_gcp/logging/log_scope.py +492 -0
  30. pulumi_gcp/looker/_inputs.py +157 -0
  31. pulumi_gcp/looker/instance.py +143 -0
  32. pulumi_gcp/looker/outputs.py +136 -0
  33. pulumi_gcp/networkconnectivity/_inputs.py +72 -3
  34. pulumi_gcp/networkconnectivity/outputs.py +51 -3
  35. pulumi_gcp/networkconnectivity/spoke.py +310 -0
  36. pulumi_gcp/networksecurity/security_profile.py +2 -2
  37. pulumi_gcp/privilegedaccessmanager/__init__.py +1 -0
  38. pulumi_gcp/privilegedaccessmanager/get_entitlement.py +219 -0
  39. pulumi_gcp/privilegedaccessmanager/outputs.py +312 -0
  40. pulumi_gcp/pulumi-plugin.json +1 -1
  41. pulumi_gcp/secretmanager/__init__.py +2 -0
  42. pulumi_gcp/secretmanager/get_regional_secret_version.py +2 -2
  43. pulumi_gcp/secretmanager/get_regional_secret_version_access.py +188 -0
  44. pulumi_gcp/secretmanager/get_regional_secrets.py +156 -0
  45. pulumi_gcp/secretmanager/outputs.py +265 -0
  46. {pulumi_gcp-8.3.1a1727284265.dist-info → pulumi_gcp-8.4.0a1727795436.dist-info}/METADATA +1 -1
  47. {pulumi_gcp-8.3.1a1727284265.dist-info → pulumi_gcp-8.4.0a1727795436.dist-info}/RECORD +49 -42
  48. {pulumi_gcp-8.3.1a1727284265.dist-info → pulumi_gcp-8.4.0a1727795436.dist-info}/WHEEL +0 -0
  49. {pulumi_gcp-8.3.1a1727284265.dist-info → pulumi_gcp-8.4.0a1727795436.dist-info}/top_level.txt +0 -0
@@ -63,6 +63,7 @@ __all__ = [
63
63
  'ServiceTemplateContainerStartupProbeHttpGetHttpHeader',
64
64
  'ServiceTemplateContainerStartupProbeTcpSocket',
65
65
  'ServiceTemplateContainerVolumeMount',
66
+ 'ServiceTemplateNodeSelector',
66
67
  'ServiceTemplateScaling',
67
68
  'ServiceTemplateServiceMesh',
68
69
  'ServiceTemplateVolume',
@@ -120,6 +121,7 @@ __all__ = [
120
121
  'GetServiceTemplateContainerStartupProbeHttpGetHttpHeaderResult',
121
122
  'GetServiceTemplateContainerStartupProbeTcpSocketResult',
122
123
  'GetServiceTemplateContainerVolumeMountResult',
124
+ 'GetServiceTemplateNodeSelectorResult',
123
125
  'GetServiceTemplateScalingResult',
124
126
  'GetServiceTemplateServiceMeshResult',
125
127
  'GetServiceTemplateVolumeResult',
@@ -2037,6 +2039,8 @@ class ServiceTemplate(dict):
2037
2039
  suggest = "execution_environment"
2038
2040
  elif key == "maxInstanceRequestConcurrency":
2039
2041
  suggest = "max_instance_request_concurrency"
2042
+ elif key == "nodeSelector":
2043
+ suggest = "node_selector"
2040
2044
  elif key == "serviceAccount":
2041
2045
  suggest = "service_account"
2042
2046
  elif key == "serviceMesh":
@@ -2064,6 +2068,7 @@ class ServiceTemplate(dict):
2064
2068
  execution_environment: Optional[str] = None,
2065
2069
  labels: Optional[Mapping[str, str]] = None,
2066
2070
  max_instance_request_concurrency: Optional[int] = None,
2071
+ node_selector: Optional['outputs.ServiceTemplateNodeSelector'] = None,
2067
2072
  revision: Optional[str] = None,
2068
2073
  scaling: Optional['outputs.ServiceTemplateScaling'] = None,
2069
2074
  service_account: Optional[str] = None,
@@ -2088,6 +2093,8 @@ class ServiceTemplate(dict):
2088
2093
  All system labels in v1 now have a corresponding field in v2 RevisionTemplate.
2089
2094
  :param int max_instance_request_concurrency: Sets the maximum number of requests that each serving instance can receive.
2090
2095
  If not specified or 0, defaults to 80 when requested CPU >= 1 and defaults to 1 when requested CPU < 1.
2096
+ :param 'ServiceTemplateNodeSelectorArgs' node_selector: Node Selector describes the hardware requirements of the resources.
2097
+ Structure is documented below.
2091
2098
  :param str revision: The unique name for the revision. If this field is omitted, it will be automatically generated based on the Service name.
2092
2099
  :param 'ServiceTemplateScalingArgs' scaling: Scaling settings for this Revision.
2093
2100
  Structure is documented below.
@@ -2114,6 +2121,8 @@ class ServiceTemplate(dict):
2114
2121
  pulumi.set(__self__, "labels", labels)
2115
2122
  if max_instance_request_concurrency is not None:
2116
2123
  pulumi.set(__self__, "max_instance_request_concurrency", max_instance_request_concurrency)
2124
+ if node_selector is not None:
2125
+ pulumi.set(__self__, "node_selector", node_selector)
2117
2126
  if revision is not None:
2118
2127
  pulumi.set(__self__, "revision", revision)
2119
2128
  if scaling is not None:
@@ -2188,6 +2197,15 @@ class ServiceTemplate(dict):
2188
2197
  """
2189
2198
  return pulumi.get(self, "max_instance_request_concurrency")
2190
2199
 
2200
+ @property
2201
+ @pulumi.getter(name="nodeSelector")
2202
+ def node_selector(self) -> Optional['outputs.ServiceTemplateNodeSelector']:
2203
+ """
2204
+ Node Selector describes the hardware requirements of the resources.
2205
+ Structure is documented below.
2206
+ """
2207
+ return pulumi.get(self, "node_selector")
2208
+
2191
2209
  @property
2192
2210
  @pulumi.getter
2193
2211
  def revision(self) -> Optional[str]:
@@ -2927,7 +2945,7 @@ class ServiceTemplateContainerResources(dict):
2927
2945
  """
2928
2946
  :param bool cpu_idle: Determines whether CPU is only allocated during requests. True by default if the parent `resources` field is not set. However, if
2929
2947
  `resources` is set, this field must be explicitly set to true to preserve the default behavior.
2930
- :param Mapping[str, str] limits: Only memory and CPU are supported. Use key `cpu` for CPU limit and `memory` for memory limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go
2948
+ :param Mapping[str, str] limits: Only memory, CPU, and nvidia.com/gpu are supported. Use key `cpu` for CPU limit, `memory` for memory limit, `nvidia.com/gpu` for gpu limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go
2931
2949
  :param bool startup_cpu_boost: Determines whether CPU should be boosted on startup of a new container instance above the requested CPU threshold, this can help reduce cold-start latency.
2932
2950
  """
2933
2951
  if cpu_idle is not None:
@@ -2950,7 +2968,7 @@ class ServiceTemplateContainerResources(dict):
2950
2968
  @pulumi.getter
2951
2969
  def limits(self) -> Optional[Mapping[str, str]]:
2952
2970
  """
2953
- Only memory and CPU are supported. Use key `cpu` for CPU limit and `memory` for memory limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go
2971
+ Only memory, CPU, and nvidia.com/gpu are supported. Use key `cpu` for CPU limit, `memory` for memory limit, `nvidia.com/gpu` for gpu limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go
2954
2972
  """
2955
2973
  return pulumi.get(self, "limits")
2956
2974
 
@@ -3285,6 +3303,28 @@ class ServiceTemplateContainerVolumeMount(dict):
3285
3303
  return pulumi.get(self, "name")
3286
3304
 
3287
3305
 
3306
+ @pulumi.output_type
3307
+ class ServiceTemplateNodeSelector(dict):
3308
+ def __init__(__self__, *,
3309
+ accelerator: str):
3310
+ """
3311
+ :param str accelerator: The GPU to attach to an instance. See https://cloud.google.com/run/docs/configuring/services/gpu for configuring GPU.
3312
+
3313
+ - - -
3314
+ """
3315
+ pulumi.set(__self__, "accelerator", accelerator)
3316
+
3317
+ @property
3318
+ @pulumi.getter
3319
+ def accelerator(self) -> str:
3320
+ """
3321
+ The GPU to attach to an instance. See https://cloud.google.com/run/docs/configuring/services/gpu for configuring GPU.
3322
+
3323
+ - - -
3324
+ """
3325
+ return pulumi.get(self, "accelerator")
3326
+
3327
+
3288
3328
  @pulumi.output_type
3289
3329
  class ServiceTemplateScaling(dict):
3290
3330
  @staticmethod
@@ -3341,8 +3381,6 @@ class ServiceTemplateServiceMesh(dict):
3341
3381
  mesh: Optional[str] = None):
3342
3382
  """
3343
3383
  :param str mesh: The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh.
3344
-
3345
- - - -
3346
3384
  """
3347
3385
  if mesh is not None:
3348
3386
  pulumi.set(__self__, "mesh", mesh)
@@ -3352,8 +3390,6 @@ class ServiceTemplateServiceMesh(dict):
3352
3390
  def mesh(self) -> Optional[str]:
3353
3391
  """
3354
3392
  The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh.
3355
-
3356
- - - -
3357
3393
  """
3358
3394
  return pulumi.get(self, "mesh")
3359
3395
 
@@ -5398,6 +5434,7 @@ class GetServiceTemplateResult(dict):
5398
5434
  execution_environment: str,
5399
5435
  labels: Mapping[str, str],
5400
5436
  max_instance_request_concurrency: int,
5437
+ node_selectors: Sequence['outputs.GetServiceTemplateNodeSelectorResult'],
5401
5438
  revision: str,
5402
5439
  scalings: Sequence['outputs.GetServiceTemplateScalingResult'],
5403
5440
  service_account: str,
@@ -5423,6 +5460,7 @@ class GetServiceTemplateResult(dict):
5423
5460
  All system labels in v1 now have a corresponding field in v2 RevisionTemplate.
5424
5461
  :param int max_instance_request_concurrency: Sets the maximum number of requests that each serving instance can receive.
5425
5462
  If not specified or 0, defaults to 80 when requested CPU >= 1 and defaults to 1 when requested CPU < 1.
5463
+ :param Sequence['GetServiceTemplateNodeSelectorArgs'] node_selectors: Node Selector describes the hardware requirements of the resources.
5426
5464
  :param str revision: The unique name for the revision. If this field is omitted, it will be automatically generated based on the Service name.
5427
5465
  :param Sequence['GetServiceTemplateScalingArgs'] scalings: Scaling settings for this Revision.
5428
5466
  :param str service_account: Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account.
@@ -5440,6 +5478,7 @@ class GetServiceTemplateResult(dict):
5440
5478
  pulumi.set(__self__, "execution_environment", execution_environment)
5441
5479
  pulumi.set(__self__, "labels", labels)
5442
5480
  pulumi.set(__self__, "max_instance_request_concurrency", max_instance_request_concurrency)
5481
+ pulumi.set(__self__, "node_selectors", node_selectors)
5443
5482
  pulumi.set(__self__, "revision", revision)
5444
5483
  pulumi.set(__self__, "scalings", scalings)
5445
5484
  pulumi.set(__self__, "service_account", service_account)
@@ -5507,6 +5546,14 @@ class GetServiceTemplateResult(dict):
5507
5546
  """
5508
5547
  return pulumi.get(self, "max_instance_request_concurrency")
5509
5548
 
5549
+ @property
5550
+ @pulumi.getter(name="nodeSelectors")
5551
+ def node_selectors(self) -> Sequence['outputs.GetServiceTemplateNodeSelectorResult']:
5552
+ """
5553
+ Node Selector describes the hardware requirements of the resources.
5554
+ """
5555
+ return pulumi.get(self, "node_selectors")
5556
+
5510
5557
  @property
5511
5558
  @pulumi.getter
5512
5559
  def revision(self) -> str:
@@ -6054,7 +6101,7 @@ class GetServiceTemplateContainerResourceResult(dict):
6054
6101
  """
6055
6102
  :param bool cpu_idle: Determines whether CPU is only allocated during requests. True by default if the parent 'resources' field is not set. However, if
6056
6103
  'resources' is set, this field must be explicitly set to true to preserve the default behavior.
6057
- :param Mapping[str, str] limits: Only memory and CPU are supported. Use key 'cpu' for CPU limit and 'memory' for memory limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go
6104
+ :param Mapping[str, str] limits: Only memory, CPU, and nvidia.com/gpu are supported. Use key 'cpu' for CPU limit, 'memory' for memory limit, 'nvidia.com/gpu' for gpu limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go
6058
6105
  :param bool startup_cpu_boost: Determines whether CPU should be boosted on startup of a new container instance above the requested CPU threshold, this can help reduce cold-start latency.
6059
6106
  """
6060
6107
  pulumi.set(__self__, "cpu_idle", cpu_idle)
@@ -6074,7 +6121,7 @@ class GetServiceTemplateContainerResourceResult(dict):
6074
6121
  @pulumi.getter
6075
6122
  def limits(self) -> Mapping[str, str]:
6076
6123
  """
6077
- Only memory and CPU are supported. Use key 'cpu' for CPU limit and 'memory' for memory limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go
6124
+ Only memory, CPU, and nvidia.com/gpu are supported. Use key 'cpu' for CPU limit, 'memory' for memory limit, 'nvidia.com/gpu' for gpu limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go
6078
6125
  """
6079
6126
  return pulumi.get(self, "limits")
6080
6127
 
@@ -6326,6 +6373,24 @@ class GetServiceTemplateContainerVolumeMountResult(dict):
6326
6373
  return pulumi.get(self, "name")
6327
6374
 
6328
6375
 
6376
+ @pulumi.output_type
6377
+ class GetServiceTemplateNodeSelectorResult(dict):
6378
+ def __init__(__self__, *,
6379
+ accelerator: str):
6380
+ """
6381
+ :param str accelerator: The GPU to attach to an instance. See https://cloud.google.com/run/docs/configuring/services/gpu for configuring GPU.
6382
+ """
6383
+ pulumi.set(__self__, "accelerator", accelerator)
6384
+
6385
+ @property
6386
+ @pulumi.getter
6387
+ def accelerator(self) -> str:
6388
+ """
6389
+ The GPU to attach to an instance. See https://cloud.google.com/run/docs/configuring/services/gpu for configuring GPU.
6390
+ """
6391
+ return pulumi.get(self, "accelerator")
6392
+
6393
+
6329
6394
  @pulumi.output_type
6330
6395
  class GetServiceTemplateScalingResult(dict):
6331
6396
  def __init__(__self__, *,
@@ -1199,6 +1199,38 @@ class Service(pulumi.CustomResource):
1199
1199
  },
1200
1200
  })
1201
1201
  ```
1202
+ ### Cloudrunv2 Service Gpu
1203
+
1204
+ ```python
1205
+ import pulumi
1206
+ import pulumi_gcp as gcp
1207
+
1208
+ default = gcp.cloudrunv2.Service("default",
1209
+ name="cloudrun-service",
1210
+ location="us-central1",
1211
+ deletion_protection=False,
1212
+ ingress="INGRESS_TRAFFIC_ALL",
1213
+ launch_stage="BETA",
1214
+ template={
1215
+ "containers": [{
1216
+ "image": "us-docker.pkg.dev/cloudrun/container/hello",
1217
+ "resources": {
1218
+ "limits": {
1219
+ "cpu": "4",
1220
+ "memory": "16Gi",
1221
+ "nvidia_com_gpu": "1",
1222
+ },
1223
+ "startup_cpu_boost": True,
1224
+ },
1225
+ }],
1226
+ "node_selector": {
1227
+ "accelerator": "nvidia-l4",
1228
+ },
1229
+ "scaling": {
1230
+ "max_instance_count": 1,
1231
+ },
1232
+ })
1233
+ ```
1202
1234
  ### Cloudrunv2 Service Probes
1203
1235
 
1204
1236
  ```python
@@ -1690,6 +1722,38 @@ class Service(pulumi.CustomResource):
1690
1722
  },
1691
1723
  })
1692
1724
  ```
1725
+ ### Cloudrunv2 Service Gpu
1726
+
1727
+ ```python
1728
+ import pulumi
1729
+ import pulumi_gcp as gcp
1730
+
1731
+ default = gcp.cloudrunv2.Service("default",
1732
+ name="cloudrun-service",
1733
+ location="us-central1",
1734
+ deletion_protection=False,
1735
+ ingress="INGRESS_TRAFFIC_ALL",
1736
+ launch_stage="BETA",
1737
+ template={
1738
+ "containers": [{
1739
+ "image": "us-docker.pkg.dev/cloudrun/container/hello",
1740
+ "resources": {
1741
+ "limits": {
1742
+ "cpu": "4",
1743
+ "memory": "16Gi",
1744
+ "nvidia_com_gpu": "1",
1745
+ },
1746
+ "startup_cpu_boost": True,
1747
+ },
1748
+ }],
1749
+ "node_selector": {
1750
+ "accelerator": "nvidia-l4",
1751
+ },
1752
+ "scaling": {
1753
+ "max_instance_count": 1,
1754
+ },
1755
+ })
1756
+ ```
1693
1757
  ### Cloudrunv2 Service Probes
1694
1758
 
1695
1759
  ```python
@@ -69,6 +69,7 @@ from .get_region_backend_service_iam_policy import *
69
69
  from .get_region_disk import *
70
70
  from .get_region_disk_iam_policy import *
71
71
  from .get_region_instance_group import *
72
+ from .get_region_instance_group_manager import *
72
73
  from .get_region_instance_template import *
73
74
  from .get_region_network_endpoint_group import *
74
75
  from .get_region_ssl_certificate import *
@@ -163,6 +164,7 @@ from .region_network_endpoint_group import *
163
164
  from .region_network_firewall_policy import *
164
165
  from .region_network_firewall_policy_association import *
165
166
  from .region_network_firewall_policy_rule import *
167
+ from .region_network_firewall_policy_with_rules import *
166
168
  from .region_per_instance_config import *
167
169
  from .region_security_policy import *
168
170
  from .region_security_policy_rule import *
@@ -179,6 +181,7 @@ from .route import *
179
181
  from .router import *
180
182
  from .router_interface import *
181
183
  from .router_nat import *
184
+ from .router_nat_address import *
182
185
  from .router_peer import *
183
186
  from .router_route_policy import *
184
187
  from .router_status import *