pulumi-gcp 7.17.0a1712402830__py3-none-any.whl → 7.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. pulumi_gcp/__init__.py +27 -0
  2. pulumi_gcp/apphub/__init__.py +1 -0
  3. pulumi_gcp/apphub/get_application.py +220 -0
  4. pulumi_gcp/apphub/outputs.py +214 -0
  5. pulumi_gcp/applicationintegration/__init__.py +10 -0
  6. pulumi_gcp/applicationintegration/_inputs.py +119 -0
  7. pulumi_gcp/applicationintegration/client.py +574 -0
  8. pulumi_gcp/applicationintegration/outputs.py +122 -0
  9. pulumi_gcp/bigquery/_inputs.py +16 -0
  10. pulumi_gcp/bigquery/outputs.py +14 -0
  11. pulumi_gcp/bigquery/routine.py +98 -0
  12. pulumi_gcp/billing/project_info.py +4 -4
  13. pulumi_gcp/cloudquota/__init__.py +2 -0
  14. pulumi_gcp/cloudquota/_inputs.py +131 -0
  15. pulumi_gcp/cloudquota/outputs.py +118 -0
  16. pulumi_gcp/cloudquota/s_quota_preference.py +777 -0
  17. pulumi_gcp/cloudrunv2/_inputs.py +73 -0
  18. pulumi_gcp/cloudrunv2/outputs.py +126 -0
  19. pulumi_gcp/compute/interconnect_attachment.py +64 -0
  20. pulumi_gcp/compute/network_endpoint.py +8 -0
  21. pulumi_gcp/compute/network_endpoint_list.py +8 -0
  22. pulumi_gcp/compute/region_backend_service.py +28 -0
  23. pulumi_gcp/compute/target_instance.py +4 -4
  24. pulumi_gcp/config/__init__.pyi +2 -0
  25. pulumi_gcp/config/vars.py +4 -0
  26. pulumi_gcp/container/_inputs.py +17 -1
  27. pulumi_gcp/container/cluster.py +47 -0
  28. pulumi_gcp/container/get_cluster.py +11 -1
  29. pulumi_gcp/container/outputs.py +27 -2
  30. pulumi_gcp/databasemigrationservice/connection_profile.py +6 -6
  31. pulumi_gcp/dataflow/flex_template_job.py +84 -112
  32. pulumi_gcp/dataplex/task.py +16 -16
  33. pulumi_gcp/dataproc/_inputs.py +71 -0
  34. pulumi_gcp/dataproc/get_metastore_service.py +11 -1
  35. pulumi_gcp/dataproc/metastore_service.py +120 -0
  36. pulumi_gcp/dataproc/outputs.py +128 -0
  37. pulumi_gcp/firebase/app_check_service_config.py +2 -2
  38. pulumi_gcp/firestore/backup_schedule.py +23 -23
  39. pulumi_gcp/firestore/field.py +4 -4
  40. pulumi_gcp/gkehub/membership_binding.py +6 -6
  41. pulumi_gcp/gkehub/membership_rbac_role_binding.py +4 -4
  42. pulumi_gcp/gkehub/namespace.py +4 -4
  43. pulumi_gcp/gkehub/scope_rbac_role_binding.py +4 -4
  44. pulumi_gcp/gkeonprem/v_mware_cluster.py +49 -0
  45. pulumi_gcp/iap/tunnel_dest_group.py +2 -2
  46. pulumi_gcp/kms/_inputs.py +46 -0
  47. pulumi_gcp/kms/crypto_key.py +54 -0
  48. pulumi_gcp/kms/crypto_key_version.py +54 -0
  49. pulumi_gcp/kms/get_kms_crypto_key.py +11 -1
  50. pulumi_gcp/kms/outputs.py +54 -0
  51. pulumi_gcp/looker/instance.py +20 -30
  52. pulumi_gcp/orgpolicy/policy.py +2 -2
  53. pulumi_gcp/provider.py +20 -0
  54. pulumi_gcp/pubsub/subscription.py +4 -4
  55. pulumi_gcp/redis/cluster.py +91 -0
  56. pulumi_gcp/storage/bucket.py +28 -0
  57. pulumi_gcp/storage/get_bucket.py +17 -2
  58. pulumi_gcp/vertex/__init__.py +1 -0
  59. pulumi_gcp/vertex/_inputs.py +169 -0
  60. pulumi_gcp/vertex/ai_deployment_resource_pool.py +477 -0
  61. pulumi_gcp/vertex/outputs.py +194 -0
  62. pulumi_gcp/vpcaccess/connector.py +77 -28
  63. {pulumi_gcp-7.17.0a1712402830.dist-info → pulumi_gcp-7.18.0.dist-info}/METADATA +1 -1
  64. {pulumi_gcp-7.17.0a1712402830.dist-info → pulumi_gcp-7.18.0.dist-info}/RECORD +66 -58
  65. {pulumi_gcp-7.17.0a1712402830.dist-info → pulumi_gcp-7.18.0.dist-info}/WHEEL +0 -0
  66. {pulumi_gcp-7.17.0a1712402830.dist-info → pulumi_gcp-7.18.0.dist-info}/top_level.txt +0 -0
@@ -12,6 +12,9 @@ from . import outputs
12
12
 
13
13
  __all__ = [
14
14
  'AiDatasetEncryptionSpec',
15
+ 'AiDeploymentResourcePoolDedicatedResources',
16
+ 'AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec',
17
+ 'AiDeploymentResourcePoolDedicatedResourcesMachineSpec',
15
18
  'AiEndpointDeployedModel',
16
19
  'AiEndpointDeployedModelAutomaticResource',
17
20
  'AiEndpointDeployedModelDedicatedResource',
@@ -106,6 +109,197 @@ class AiDatasetEncryptionSpec(dict):
106
109
  return pulumi.get(self, "kms_key_name")
107
110
 
108
111
 
112
+ @pulumi.output_type
113
+ class AiDeploymentResourcePoolDedicatedResources(dict):
114
+ @staticmethod
115
+ def __key_warning(key: str):
116
+ suggest = None
117
+ if key == "machineSpec":
118
+ suggest = "machine_spec"
119
+ elif key == "minReplicaCount":
120
+ suggest = "min_replica_count"
121
+ elif key == "autoscalingMetricSpecs":
122
+ suggest = "autoscaling_metric_specs"
123
+ elif key == "maxReplicaCount":
124
+ suggest = "max_replica_count"
125
+
126
+ if suggest:
127
+ pulumi.log.warn(f"Key '{key}' not found in AiDeploymentResourcePoolDedicatedResources. Access the value via the '{suggest}' property getter instead.")
128
+
129
+ def __getitem__(self, key: str) -> Any:
130
+ AiDeploymentResourcePoolDedicatedResources.__key_warning(key)
131
+ return super().__getitem__(key)
132
+
133
+ def get(self, key: str, default = None) -> Any:
134
+ AiDeploymentResourcePoolDedicatedResources.__key_warning(key)
135
+ return super().get(key, default)
136
+
137
+ def __init__(__self__, *,
138
+ machine_spec: 'outputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpec',
139
+ min_replica_count: int,
140
+ autoscaling_metric_specs: Optional[Sequence['outputs.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec']] = None,
141
+ max_replica_count: Optional[int] = None):
142
+ """
143
+ :param 'AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs' machine_spec: The specification of a single machine used by the prediction
144
+ Structure is documented below.
145
+ :param int min_replica_count: The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
146
+ :param Sequence['AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs'] autoscaling_metric_specs: A list of the metric specifications that overrides a resource utilization metric.
147
+ Structure is documented below.
148
+ :param int max_replica_count: The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
149
+ """
150
+ pulumi.set(__self__, "machine_spec", machine_spec)
151
+ pulumi.set(__self__, "min_replica_count", min_replica_count)
152
+ if autoscaling_metric_specs is not None:
153
+ pulumi.set(__self__, "autoscaling_metric_specs", autoscaling_metric_specs)
154
+ if max_replica_count is not None:
155
+ pulumi.set(__self__, "max_replica_count", max_replica_count)
156
+
157
+ @property
158
+ @pulumi.getter(name="machineSpec")
159
+ def machine_spec(self) -> 'outputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpec':
160
+ """
161
+ The specification of a single machine used by the prediction
162
+ Structure is documented below.
163
+ """
164
+ return pulumi.get(self, "machine_spec")
165
+
166
+ @property
167
+ @pulumi.getter(name="minReplicaCount")
168
+ def min_replica_count(self) -> int:
169
+ """
170
+ The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
171
+ """
172
+ return pulumi.get(self, "min_replica_count")
173
+
174
+ @property
175
+ @pulumi.getter(name="autoscalingMetricSpecs")
176
+ def autoscaling_metric_specs(self) -> Optional[Sequence['outputs.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec']]:
177
+ """
178
+ A list of the metric specifications that overrides a resource utilization metric.
179
+ Structure is documented below.
180
+ """
181
+ return pulumi.get(self, "autoscaling_metric_specs")
182
+
183
+ @property
184
+ @pulumi.getter(name="maxReplicaCount")
185
+ def max_replica_count(self) -> Optional[int]:
186
+ """
187
+ The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
188
+ """
189
+ return pulumi.get(self, "max_replica_count")
190
+
191
+
192
+ @pulumi.output_type
193
+ class AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec(dict):
194
+ @staticmethod
195
+ def __key_warning(key: str):
196
+ suggest = None
197
+ if key == "metricName":
198
+ suggest = "metric_name"
199
+
200
+ if suggest:
201
+ pulumi.log.warn(f"Key '{key}' not found in AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec. Access the value via the '{suggest}' property getter instead.")
202
+
203
+ def __getitem__(self, key: str) -> Any:
204
+ AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec.__key_warning(key)
205
+ return super().__getitem__(key)
206
+
207
+ def get(self, key: str, default = None) -> Any:
208
+ AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec.__key_warning(key)
209
+ return super().get(key, default)
210
+
211
+ def __init__(__self__, *,
212
+ metric_name: str,
213
+ target: Optional[int] = None):
214
+ """
215
+ :param str metric_name: The resource metric name. Supported metrics: For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
216
+ :param int target: The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
217
+ """
218
+ pulumi.set(__self__, "metric_name", metric_name)
219
+ if target is not None:
220
+ pulumi.set(__self__, "target", target)
221
+
222
+ @property
223
+ @pulumi.getter(name="metricName")
224
+ def metric_name(self) -> str:
225
+ """
226
+ The resource metric name. Supported metrics: For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
227
+ """
228
+ return pulumi.get(self, "metric_name")
229
+
230
+ @property
231
+ @pulumi.getter
232
+ def target(self) -> Optional[int]:
233
+ """
234
+ The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
235
+ """
236
+ return pulumi.get(self, "target")
237
+
238
+
239
+ @pulumi.output_type
240
+ class AiDeploymentResourcePoolDedicatedResourcesMachineSpec(dict):
241
+ @staticmethod
242
+ def __key_warning(key: str):
243
+ suggest = None
244
+ if key == "acceleratorCount":
245
+ suggest = "accelerator_count"
246
+ elif key == "acceleratorType":
247
+ suggest = "accelerator_type"
248
+ elif key == "machineType":
249
+ suggest = "machine_type"
250
+
251
+ if suggest:
252
+ pulumi.log.warn(f"Key '{key}' not found in AiDeploymentResourcePoolDedicatedResourcesMachineSpec. Access the value via the '{suggest}' property getter instead.")
253
+
254
+ def __getitem__(self, key: str) -> Any:
255
+ AiDeploymentResourcePoolDedicatedResourcesMachineSpec.__key_warning(key)
256
+ return super().__getitem__(key)
257
+
258
+ def get(self, key: str, default = None) -> Any:
259
+ AiDeploymentResourcePoolDedicatedResourcesMachineSpec.__key_warning(key)
260
+ return super().get(key, default)
261
+
262
+ def __init__(__self__, *,
263
+ accelerator_count: Optional[int] = None,
264
+ accelerator_type: Optional[str] = None,
265
+ machine_type: Optional[str] = None):
266
+ """
267
+ :param int accelerator_count: The number of accelerators to attach to the machine.
268
+ :param str accelerator_type: The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).
269
+ :param str machine_type: The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types).
270
+ """
271
+ if accelerator_count is not None:
272
+ pulumi.set(__self__, "accelerator_count", accelerator_count)
273
+ if accelerator_type is not None:
274
+ pulumi.set(__self__, "accelerator_type", accelerator_type)
275
+ if machine_type is not None:
276
+ pulumi.set(__self__, "machine_type", machine_type)
277
+
278
+ @property
279
+ @pulumi.getter(name="acceleratorCount")
280
+ def accelerator_count(self) -> Optional[int]:
281
+ """
282
+ The number of accelerators to attach to the machine.
283
+ """
284
+ return pulumi.get(self, "accelerator_count")
285
+
286
+ @property
287
+ @pulumi.getter(name="acceleratorType")
288
+ def accelerator_type(self) -> Optional[str]:
289
+ """
290
+ The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).
291
+ """
292
+ return pulumi.get(self, "accelerator_type")
293
+
294
+ @property
295
+ @pulumi.getter(name="machineType")
296
+ def machine_type(self) -> Optional[str]:
297
+ """
298
+ The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types).
299
+ """
300
+ return pulumi.get(self, "machine_type")
301
+
302
+
109
303
  @pulumi.output_type
110
304
  class AiEndpointDeployedModel(dict):
111
305
  @staticmethod
@@ -31,10 +31,17 @@ class ConnectorArgs:
31
31
  The set of arguments for constructing a Connector resource.
32
32
  :param pulumi.Input[str] ip_cidr_range: The range of internal addresses that follows RFC 4632 notation. Example: `10.132.0.0/28`.
33
33
  :param pulumi.Input[str] machine_type: Machine type of VM Instance underlying connector. Default is e2-micro
34
- :param pulumi.Input[int] max_instances: Maximum value of instances in autoscaling group underlying the connector.
35
- :param pulumi.Input[int] max_throughput: Maximum throughput of the connector in Mbps, must be greater than `min_throughput`. Default is 300.
36
- :param pulumi.Input[int] min_instances: Minimum value of instances in autoscaling group underlying the connector.
37
- :param pulumi.Input[int] min_throughput: Minimum throughput of the connector in Mbps. Default and min is 200.
34
+ :param pulumi.Input[int] max_instances: Maximum value of instances in autoscaling group underlying the connector. Value must be between 3 and 10, inclusive. Must be
35
+ higher than the value specified by min_instances.
36
+ :param pulumi.Input[int] max_throughput: Maximum throughput of the connector in Mbps, must be greater than `min_throughput`. Default is 300. Refers to the expected throughput
37
+ when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by
38
+ min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of
39
+ max_throughput is discouraged in favor of max_instances.
40
+ :param pulumi.Input[int] min_instances: Minimum value of instances in autoscaling group underlying the connector. Value must be between 2 and 9, inclusive. Must be
41
+ lower than the value specified by max_instances.
42
+ :param pulumi.Input[int] min_throughput: Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type.
43
+ Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and
44
+ min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances.
38
45
  :param pulumi.Input[str] name: The name of the resource (Max 25 characters).
39
46
 
40
47
 
@@ -97,7 +104,8 @@ class ConnectorArgs:
97
104
  @pulumi.getter(name="maxInstances")
98
105
  def max_instances(self) -> Optional[pulumi.Input[int]]:
99
106
  """
100
- Maximum value of instances in autoscaling group underlying the connector.
107
+ Maximum value of instances in autoscaling group underlying the connector. Value must be between 3 and 10, inclusive. Must be
108
+ higher than the value specified by min_instances.
101
109
  """
102
110
  return pulumi.get(self, "max_instances")
103
111
 
@@ -109,7 +117,10 @@ class ConnectorArgs:
109
117
  @pulumi.getter(name="maxThroughput")
110
118
  def max_throughput(self) -> Optional[pulumi.Input[int]]:
111
119
  """
112
- Maximum throughput of the connector in Mbps, must be greater than `min_throughput`. Default is 300.
120
+ Maximum throughput of the connector in Mbps, must be greater than `min_throughput`. Default is 300. Refers to the expected throughput
121
+ when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by
122
+ min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of
123
+ max_throughput is discouraged in favor of max_instances.
113
124
  """
114
125
  return pulumi.get(self, "max_throughput")
115
126
 
@@ -121,7 +132,8 @@ class ConnectorArgs:
121
132
  @pulumi.getter(name="minInstances")
122
133
  def min_instances(self) -> Optional[pulumi.Input[int]]:
123
134
  """
124
- Minimum value of instances in autoscaling group underlying the connector.
135
+ Minimum value of instances in autoscaling group underlying the connector. Value must be between 2 and 9, inclusive. Must be
136
+ lower than the value specified by max_instances.
125
137
  """
126
138
  return pulumi.get(self, "min_instances")
127
139
 
@@ -133,7 +145,9 @@ class ConnectorArgs:
133
145
  @pulumi.getter(name="minThroughput")
134
146
  def min_throughput(self) -> Optional[pulumi.Input[int]]:
135
147
  """
136
- Minimum throughput of the connector in Mbps. Default and min is 200.
148
+ Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type.
149
+ Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and
150
+ min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances.
137
151
  """
138
152
  return pulumi.get(self, "min_throughput")
139
153
 
@@ -229,10 +243,17 @@ class _ConnectorState:
229
243
  :param pulumi.Input[Sequence[pulumi.Input[str]]] connected_projects: List of projects using the connector.
230
244
  :param pulumi.Input[str] ip_cidr_range: The range of internal addresses that follows RFC 4632 notation. Example: `10.132.0.0/28`.
231
245
  :param pulumi.Input[str] machine_type: Machine type of VM Instance underlying connector. Default is e2-micro
232
- :param pulumi.Input[int] max_instances: Maximum value of instances in autoscaling group underlying the connector.
233
- :param pulumi.Input[int] max_throughput: Maximum throughput of the connector in Mbps, must be greater than `min_throughput`. Default is 300.
234
- :param pulumi.Input[int] min_instances: Minimum value of instances in autoscaling group underlying the connector.
235
- :param pulumi.Input[int] min_throughput: Minimum throughput of the connector in Mbps. Default and min is 200.
246
+ :param pulumi.Input[int] max_instances: Maximum value of instances in autoscaling group underlying the connector. Value must be between 3 and 10, inclusive. Must be
247
+ higher than the value specified by min_instances.
248
+ :param pulumi.Input[int] max_throughput: Maximum throughput of the connector in Mbps, must be greater than `min_throughput`. Default is 300. Refers to the expected throughput
249
+ when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by
250
+ min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of
251
+ max_throughput is discouraged in favor of max_instances.
252
+ :param pulumi.Input[int] min_instances: Minimum value of instances in autoscaling group underlying the connector. Value must be between 2 and 9, inclusive. Must be
253
+ lower than the value specified by max_instances.
254
+ :param pulumi.Input[int] min_throughput: Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type.
255
+ Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and
256
+ min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances.
236
257
  :param pulumi.Input[str] name: The name of the resource (Max 25 characters).
237
258
 
238
259
 
@@ -315,7 +336,8 @@ class _ConnectorState:
315
336
  @pulumi.getter(name="maxInstances")
316
337
  def max_instances(self) -> Optional[pulumi.Input[int]]:
317
338
  """
318
- Maximum value of instances in autoscaling group underlying the connector.
339
+ Maximum value of instances in autoscaling group underlying the connector. Value must be between 3 and 10, inclusive. Must be
340
+ higher than the value specified by min_instances.
319
341
  """
320
342
  return pulumi.get(self, "max_instances")
321
343
 
@@ -327,7 +349,10 @@ class _ConnectorState:
327
349
  @pulumi.getter(name="maxThroughput")
328
350
  def max_throughput(self) -> Optional[pulumi.Input[int]]:
329
351
  """
330
- Maximum throughput of the connector in Mbps, must be greater than `min_throughput`. Default is 300.
352
+ Maximum throughput of the connector in Mbps, must be greater than `min_throughput`. Default is 300. Refers to the expected throughput
353
+ when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by
354
+ min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of
355
+ max_throughput is discouraged in favor of max_instances.
331
356
  """
332
357
  return pulumi.get(self, "max_throughput")
333
358
 
@@ -339,7 +364,8 @@ class _ConnectorState:
339
364
  @pulumi.getter(name="minInstances")
340
365
  def min_instances(self) -> Optional[pulumi.Input[int]]:
341
366
  """
342
- Minimum value of instances in autoscaling group underlying the connector.
367
+ Minimum value of instances in autoscaling group underlying the connector. Value must be between 2 and 9, inclusive. Must be
368
+ lower than the value specified by max_instances.
343
369
  """
344
370
  return pulumi.get(self, "min_instances")
345
371
 
@@ -351,7 +377,9 @@ class _ConnectorState:
351
377
  @pulumi.getter(name="minThroughput")
352
378
  def min_throughput(self) -> Optional[pulumi.Input[int]]:
353
379
  """
354
- Minimum throughput of the connector in Mbps. Default and min is 200.
380
+ Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type.
381
+ Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and
382
+ min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances.
355
383
  """
356
384
  return pulumi.get(self, "min_throughput")
357
385
 
@@ -548,10 +576,17 @@ class Connector(pulumi.CustomResource):
548
576
  :param pulumi.ResourceOptions opts: Options for the resource.
549
577
  :param pulumi.Input[str] ip_cidr_range: The range of internal addresses that follows RFC 4632 notation. Example: `10.132.0.0/28`.
550
578
  :param pulumi.Input[str] machine_type: Machine type of VM Instance underlying connector. Default is e2-micro
551
- :param pulumi.Input[int] max_instances: Maximum value of instances in autoscaling group underlying the connector.
552
- :param pulumi.Input[int] max_throughput: Maximum throughput of the connector in Mbps, must be greater than `min_throughput`. Default is 300.
553
- :param pulumi.Input[int] min_instances: Minimum value of instances in autoscaling group underlying the connector.
554
- :param pulumi.Input[int] min_throughput: Minimum throughput of the connector in Mbps. Default and min is 200.
579
+ :param pulumi.Input[int] max_instances: Maximum value of instances in autoscaling group underlying the connector. Value must be between 3 and 10, inclusive. Must be
580
+ higher than the value specified by min_instances.
581
+ :param pulumi.Input[int] max_throughput: Maximum throughput of the connector in Mbps, must be greater than `min_throughput`. Default is 300. Refers to the expected throughput
582
+ when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by
583
+ min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of
584
+ max_throughput is discouraged in favor of max_instances.
585
+ :param pulumi.Input[int] min_instances: Minimum value of instances in autoscaling group underlying the connector. Value must be between 2 and 9, inclusive. Must be
586
+ lower than the value specified by max_instances.
587
+ :param pulumi.Input[int] min_throughput: Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type.
588
+ Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and
589
+ min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances.
555
590
  :param pulumi.Input[str] name: The name of the resource (Max 25 characters).
556
591
 
557
592
 
@@ -730,10 +765,17 @@ class Connector(pulumi.CustomResource):
730
765
  :param pulumi.Input[Sequence[pulumi.Input[str]]] connected_projects: List of projects using the connector.
731
766
  :param pulumi.Input[str] ip_cidr_range: The range of internal addresses that follows RFC 4632 notation. Example: `10.132.0.0/28`.
732
767
  :param pulumi.Input[str] machine_type: Machine type of VM Instance underlying connector. Default is e2-micro
733
- :param pulumi.Input[int] max_instances: Maximum value of instances in autoscaling group underlying the connector.
734
- :param pulumi.Input[int] max_throughput: Maximum throughput of the connector in Mbps, must be greater than `min_throughput`. Default is 300.
735
- :param pulumi.Input[int] min_instances: Minimum value of instances in autoscaling group underlying the connector.
736
- :param pulumi.Input[int] min_throughput: Minimum throughput of the connector in Mbps. Default and min is 200.
768
+ :param pulumi.Input[int] max_instances: Maximum value of instances in autoscaling group underlying the connector. Value must be between 3 and 10, inclusive. Must be
769
+ higher than the value specified by min_instances.
770
+ :param pulumi.Input[int] max_throughput: Maximum throughput of the connector in Mbps, must be greater than `min_throughput`. Default is 300. Refers to the expected throughput
771
+ when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by
772
+ min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of
773
+ max_throughput is discouraged in favor of max_instances.
774
+ :param pulumi.Input[int] min_instances: Minimum value of instances in autoscaling group underlying the connector. Value must be between 2 and 9, inclusive. Must be
775
+ lower than the value specified by max_instances.
776
+ :param pulumi.Input[int] min_throughput: Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type.
777
+ Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and
778
+ min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances.
737
779
  :param pulumi.Input[str] name: The name of the resource (Max 25 characters).
738
780
 
739
781
 
@@ -795,7 +837,8 @@ class Connector(pulumi.CustomResource):
795
837
  @pulumi.getter(name="maxInstances")
796
838
  def max_instances(self) -> pulumi.Output[int]:
797
839
  """
798
- Maximum value of instances in autoscaling group underlying the connector.
840
+ Maximum value of instances in autoscaling group underlying the connector. Value must be between 3 and 10, inclusive. Must be
841
+ higher than the value specified by min_instances.
799
842
  """
800
843
  return pulumi.get(self, "max_instances")
801
844
 
@@ -803,7 +846,10 @@ class Connector(pulumi.CustomResource):
803
846
  @pulumi.getter(name="maxThroughput")
804
847
  def max_throughput(self) -> pulumi.Output[Optional[int]]:
805
848
  """
806
- Maximum throughput of the connector in Mbps, must be greater than `min_throughput`. Default is 300.
849
+ Maximum throughput of the connector in Mbps, must be greater than `min_throughput`. Default is 300. Refers to the expected throughput
850
+ when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by
851
+ min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of
852
+ max_throughput is discouraged in favor of max_instances.
807
853
  """
808
854
  return pulumi.get(self, "max_throughput")
809
855
 
@@ -811,7 +857,8 @@ class Connector(pulumi.CustomResource):
811
857
  @pulumi.getter(name="minInstances")
812
858
  def min_instances(self) -> pulumi.Output[int]:
813
859
  """
814
- Minimum value of instances in autoscaling group underlying the connector.
860
+ Minimum value of instances in autoscaling group underlying the connector. Value must be between 2 and 9, inclusive. Must be
861
+ lower than the value specified by max_instances.
815
862
  """
816
863
  return pulumi.get(self, "min_instances")
817
864
 
@@ -819,7 +866,9 @@ class Connector(pulumi.CustomResource):
819
866
  @pulumi.getter(name="minThroughput")
820
867
  def min_throughput(self) -> pulumi.Output[Optional[int]]:
821
868
  """
822
- Minimum throughput of the connector in Mbps. Default and min is 200.
869
+ Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type.
870
+ Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and
871
+ min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances.
823
872
  """
824
873
  return pulumi.get(self, "min_throughput")
825
874
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pulumi_gcp
3
- Version: 7.17.0a1712402830
3
+ Version: 7.18.0
4
4
  Summary: A Pulumi package for creating and managing Google Cloud Platform resources.
5
5
  License: Apache-2.0
6
6
  Project-URL: Homepage, https://pulumi.io