pulumi-gcp 8.36.0a1750484065__py3-none-any.whl → 8.37.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. pulumi_gcp/__init__.py +59 -0
  2. pulumi_gcp/alloydb/_inputs.py +26 -0
  3. pulumi_gcp/alloydb/outputs.py +34 -1
  4. pulumi_gcp/apihub/__init__.py +2 -0
  5. pulumi_gcp/apihub/_inputs.py +1658 -0
  6. pulumi_gcp/apihub/outputs.py +1374 -0
  7. pulumi_gcp/apihub/plugin.py +1146 -0
  8. pulumi_gcp/apihub/plugin_instance.py +808 -0
  9. pulumi_gcp/bigquery/table.py +16 -12
  10. pulumi_gcp/bigqueryanalyticshub/_inputs.py +56 -6
  11. pulumi_gcp/bigqueryanalyticshub/data_exchange.py +73 -0
  12. pulumi_gcp/bigqueryanalyticshub/listing.py +201 -48
  13. pulumi_gcp/bigqueryanalyticshub/listing_subscription.py +28 -0
  14. pulumi_gcp/bigqueryanalyticshub/outputs.py +50 -4
  15. pulumi_gcp/bigtable/table.py +82 -0
  16. pulumi_gcp/cloudidentity/group_membership.py +47 -0
  17. pulumi_gcp/cloudidentity/outputs.py +11 -0
  18. pulumi_gcp/cloudquota/s_quota_adjuster_settings.py +64 -8
  19. pulumi_gcp/cloudrunv2/_inputs.py +65 -9
  20. pulumi_gcp/cloudrunv2/job.py +44 -0
  21. pulumi_gcp/cloudrunv2/outputs.py +73 -8
  22. pulumi_gcp/cloudrunv2/service.py +0 -2
  23. pulumi_gcp/cloudrunv2/worker_pool.py +18 -20
  24. pulumi_gcp/compute/__init__.py +1 -0
  25. pulumi_gcp/compute/_inputs.py +787 -18
  26. pulumi_gcp/compute/disk.py +35 -28
  27. pulumi_gcp/compute/firewall_policy_rule.py +207 -0
  28. pulumi_gcp/compute/get_instance_group_manager.py +12 -1
  29. pulumi_gcp/compute/get_region_disk.py +12 -1
  30. pulumi_gcp/compute/instance.py +2 -2
  31. pulumi_gcp/compute/instance_group_manager.py +165 -14
  32. pulumi_gcp/compute/instance_template.py +2 -2
  33. pulumi_gcp/compute/interconnect.py +43 -11
  34. pulumi_gcp/compute/network.py +56 -0
  35. pulumi_gcp/compute/network_firewall_policy.py +68 -0
  36. pulumi_gcp/compute/network_firewall_policy_with_rules.py +61 -0
  37. pulumi_gcp/compute/node_template.py +21 -0
  38. pulumi_gcp/compute/outputs.py +620 -12
  39. pulumi_gcp/compute/region_disk.py +114 -0
  40. pulumi_gcp/compute/region_network_firewall_policy.py +90 -0
  41. pulumi_gcp/compute/region_network_firewall_policy_with_rules.py +109 -0
  42. pulumi_gcp/compute/service_attachment.py +76 -8
  43. pulumi_gcp/compute/wire_group.py +751 -0
  44. pulumi_gcp/config/__init__.pyi +2 -0
  45. pulumi_gcp/config/vars.py +4 -0
  46. pulumi_gcp/contactcenterinsights/__init__.py +9 -0
  47. pulumi_gcp/contactcenterinsights/view.py +526 -0
  48. pulumi_gcp/container/_inputs.py +207 -28
  49. pulumi_gcp/container/cluster.py +54 -0
  50. pulumi_gcp/container/get_cluster.py +12 -1
  51. pulumi_gcp/container/get_engine_versions.py +15 -1
  52. pulumi_gcp/container/node_pool.py +14 -0
  53. pulumi_gcp/container/outputs.py +295 -21
  54. pulumi_gcp/dataplex/_inputs.py +431 -6
  55. pulumi_gcp/dataplex/datascan.py +251 -0
  56. pulumi_gcp/dataplex/entry_type.py +2 -2
  57. pulumi_gcp/dataplex/glossary_category.py +8 -8
  58. pulumi_gcp/dataplex/glossary_term.py +8 -8
  59. pulumi_gcp/dataplex/outputs.py +353 -4
  60. pulumi_gcp/dataplex/task.py +16 -16
  61. pulumi_gcp/dataproc/__init__.py +1 -0
  62. pulumi_gcp/dataproc/_inputs.py +486 -0
  63. pulumi_gcp/dataproc/batch.py +10 -10
  64. pulumi_gcp/dataproc/outputs.py +407 -0
  65. pulumi_gcp/dataproc/session_template.py +1084 -0
  66. pulumi_gcp/diagflow/__init__.py +2 -0
  67. pulumi_gcp/diagflow/_inputs.py +479 -0
  68. pulumi_gcp/diagflow/cx_generative_settings.py +625 -0
  69. pulumi_gcp/diagflow/cx_tool.py +2 -2
  70. pulumi_gcp/diagflow/encryption_spec.py +382 -0
  71. pulumi_gcp/diagflow/outputs.py +416 -0
  72. pulumi_gcp/dns/record_set.py +4 -2
  73. pulumi_gcp/firestore/database.py +0 -9
  74. pulumi_gcp/firestore/field.py +6 -6
  75. pulumi_gcp/gkehub/membership_binding.py +6 -6
  76. pulumi_gcp/gkehub/membership_rbac_role_binding.py +4 -4
  77. pulumi_gcp/gkehub/namespace.py +4 -4
  78. pulumi_gcp/gkehub/scope_rbac_role_binding.py +8 -8
  79. pulumi_gcp/iam/_inputs.py +36 -0
  80. pulumi_gcp/iam/outputs.py +38 -0
  81. pulumi_gcp/iam/workload_identity_pool_managed_identity.py +88 -2
  82. pulumi_gcp/iap/tunnel_dest_group.py +2 -2
  83. pulumi_gcp/integrationconnectors/managed_zone.py +8 -8
  84. pulumi_gcp/kms/autokey_config.py +28 -0
  85. pulumi_gcp/kms/get_autokey_config.py +12 -1
  86. pulumi_gcp/lustre/get_instance.py +12 -1
  87. pulumi_gcp/lustre/instance.py +143 -86
  88. pulumi_gcp/managedkafka/connect_cluster.py +4 -4
  89. pulumi_gcp/managedkafka/connector.py +4 -4
  90. pulumi_gcp/memorystore/get_instance.py +12 -1
  91. pulumi_gcp/memorystore/instance.py +49 -0
  92. pulumi_gcp/monitoring/metric_descriptor.py +55 -57
  93. pulumi_gcp/networkconnectivity/spoke.py +14 -14
  94. pulumi_gcp/notebooks/instance.py +8 -8
  95. pulumi_gcp/orgpolicy/policy.py +2 -2
  96. pulumi_gcp/provider.py +20 -0
  97. pulumi_gcp/pubsub/subscription.py +6 -6
  98. pulumi_gcp/pulumi-plugin.json +1 -1
  99. pulumi_gcp/redis/get_instance.py +12 -1
  100. pulumi_gcp/redis/instance.py +44 -0
  101. pulumi_gcp/secretmanager/get_regional_secret.py +12 -1
  102. pulumi_gcp/secretmanager/outputs.py +7 -0
  103. pulumi_gcp/secretmanager/regional_secret.py +38 -2
  104. pulumi_gcp/spanner/_inputs.py +24 -1
  105. pulumi_gcp/spanner/outputs.py +17 -1
  106. pulumi_gcp/storage/_inputs.py +43 -3
  107. pulumi_gcp/storage/bucket_object.py +56 -0
  108. pulumi_gcp/storage/get_bucket_object.py +12 -1
  109. pulumi_gcp/storage/get_bucket_object_content.py +12 -1
  110. pulumi_gcp/storage/outputs.py +53 -3
  111. pulumi_gcp/vertex/ai_endpoint.py +4 -4
  112. pulumi_gcp/vertex/ai_feature_online_store_featureview.py +4 -4
  113. pulumi_gcp/workbench/instance.py +2 -0
  114. {pulumi_gcp-8.36.0a1750484065.dist-info → pulumi_gcp-8.37.0.dist-info}/METADATA +2 -2
  115. {pulumi_gcp-8.36.0a1750484065.dist-info → pulumi_gcp-8.37.0.dist-info}/RECORD +117 -109
  116. {pulumi_gcp-8.36.0a1750484065.dist-info → pulumi_gcp-8.37.0.dist-info}/WHEEL +0 -0
  117. {pulumi_gcp-8.36.0a1750484065.dist-info → pulumi_gcp-8.37.0.dist-info}/top_level.txt +0 -0
@@ -30,6 +30,11 @@ __all__ = [
30
30
  'AssetResourceStatus',
31
31
  'AssetSecurityStatus',
32
32
  'DatascanData',
33
+ 'DatascanDataDiscoverySpec',
34
+ 'DatascanDataDiscoverySpecBigqueryPublishingConfig',
35
+ 'DatascanDataDiscoverySpecStorageConfig',
36
+ 'DatascanDataDiscoverySpecStorageConfigCsvOptions',
37
+ 'DatascanDataDiscoverySpecStorageConfigJsonOptions',
33
38
  'DatascanDataProfileSpec',
34
39
  'DatascanDataProfileSpecExcludeFields',
35
40
  'DatascanDataProfileSpecIncludeFields',
@@ -807,7 +812,7 @@ class DatascanData(dict):
807
812
  """
808
813
  :param builtins.str entity: The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.
809
814
  :param builtins.str resource: The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be:
810
- (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.
815
+ Cloud Storage bucket (//storage.googleapis.com/projects/PROJECT_ID/buckets/BUCKET_ID) for DataDiscoveryScan OR BigQuery table of type "TABLE" (/bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID) for DataProfileScan/DataQualityScan.
811
816
  """
812
817
  if entity is not None:
813
818
  pulumi.set(__self__, "entity", entity)
@@ -827,11 +832,355 @@ class DatascanData(dict):
827
832
  def resource(self) -> Optional[builtins.str]:
828
833
  """
829
834
  The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be:
830
- (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.
835
+ Cloud Storage bucket (//storage.googleapis.com/projects/PROJECT_ID/buckets/BUCKET_ID) for DataDiscoveryScan OR BigQuery table of type "TABLE" (/bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID) for DataProfileScan/DataQualityScan.
831
836
  """
832
837
  return pulumi.get(self, "resource")
833
838
 
834
839
 
840
+ @pulumi.output_type
841
+ class DatascanDataDiscoverySpec(dict):
842
+ @staticmethod
843
+ def __key_warning(key: str):
844
+ suggest = None
845
+ if key == "bigqueryPublishingConfig":
846
+ suggest = "bigquery_publishing_config"
847
+ elif key == "storageConfig":
848
+ suggest = "storage_config"
849
+
850
+ if suggest:
851
+ pulumi.log.warn(f"Key '{key}' not found in DatascanDataDiscoverySpec. Access the value via the '{suggest}' property getter instead.")
852
+
853
+ def __getitem__(self, key: str) -> Any:
854
+ DatascanDataDiscoverySpec.__key_warning(key)
855
+ return super().__getitem__(key)
856
+
857
+ def get(self, key: str, default = None) -> Any:
858
+ DatascanDataDiscoverySpec.__key_warning(key)
859
+ return super().get(key, default)
860
+
861
+ def __init__(__self__, *,
862
+ bigquery_publishing_config: Optional['outputs.DatascanDataDiscoverySpecBigqueryPublishingConfig'] = None,
863
+ storage_config: Optional['outputs.DatascanDataDiscoverySpecStorageConfig'] = None):
864
+ """
865
+ :param 'DatascanDataDiscoverySpecBigqueryPublishingConfigArgs' bigquery_publishing_config: Configuration for metadata publishing.
866
+ Structure is documented below.
867
+ :param 'DatascanDataDiscoverySpecStorageConfigArgs' storage_config: Configurations related to Cloud Storage as the data source.
868
+ Structure is documented below.
869
+ """
870
+ if bigquery_publishing_config is not None:
871
+ pulumi.set(__self__, "bigquery_publishing_config", bigquery_publishing_config)
872
+ if storage_config is not None:
873
+ pulumi.set(__self__, "storage_config", storage_config)
874
+
875
+ @property
876
+ @pulumi.getter(name="bigqueryPublishingConfig")
877
+ def bigquery_publishing_config(self) -> Optional['outputs.DatascanDataDiscoverySpecBigqueryPublishingConfig']:
878
+ """
879
+ Configuration for metadata publishing.
880
+ Structure is documented below.
881
+ """
882
+ return pulumi.get(self, "bigquery_publishing_config")
883
+
884
+ @property
885
+ @pulumi.getter(name="storageConfig")
886
+ def storage_config(self) -> Optional['outputs.DatascanDataDiscoverySpecStorageConfig']:
887
+ """
888
+ Configurations related to Cloud Storage as the data source.
889
+ Structure is documented below.
890
+ """
891
+ return pulumi.get(self, "storage_config")
892
+
893
+
894
+ @pulumi.output_type
895
+ class DatascanDataDiscoverySpecBigqueryPublishingConfig(dict):
896
+ @staticmethod
897
+ def __key_warning(key: str):
898
+ suggest = None
899
+ if key == "tableType":
900
+ suggest = "table_type"
901
+
902
+ if suggest:
903
+ pulumi.log.warn(f"Key '{key}' not found in DatascanDataDiscoverySpecBigqueryPublishingConfig. Access the value via the '{suggest}' property getter instead.")
904
+
905
+ def __getitem__(self, key: str) -> Any:
906
+ DatascanDataDiscoverySpecBigqueryPublishingConfig.__key_warning(key)
907
+ return super().__getitem__(key)
908
+
909
+ def get(self, key: str, default = None) -> Any:
910
+ DatascanDataDiscoverySpecBigqueryPublishingConfig.__key_warning(key)
911
+ return super().get(key, default)
912
+
913
+ def __init__(__self__, *,
914
+ connection: Optional[builtins.str] = None,
915
+ location: Optional[builtins.str] = None,
916
+ project: Optional[builtins.str] = None,
917
+ table_type: Optional[builtins.str] = None):
918
+ """
919
+ :param builtins.str connection: The BigQuery connection used to create BigLake tables. Must be in the form `projects/{projectId}/locations/{locationId}/connections/{connection_id}`.
920
+ :param builtins.str location: The location of the BigQuery dataset to publish BigLake external or non-BigLake external tables to.
921
+ :param builtins.str project: The project of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. If not specified, the project of the Cloud Storage bucket will be used. The format is "projects/{project_id_or_number}".
922
+ :param builtins.str table_type: Determines whether to publish discovered tables as BigLake external tables or non-BigLake external tables.
923
+ Possible values are: `TABLE_TYPE_UNSPECIFIED`, `EXTERNAL`, `BIGLAKE`.
924
+ """
925
+ if connection is not None:
926
+ pulumi.set(__self__, "connection", connection)
927
+ if location is not None:
928
+ pulumi.set(__self__, "location", location)
929
+ if project is not None:
930
+ pulumi.set(__self__, "project", project)
931
+ if table_type is not None:
932
+ pulumi.set(__self__, "table_type", table_type)
933
+
934
+ @property
935
+ @pulumi.getter
936
+ def connection(self) -> Optional[builtins.str]:
937
+ """
938
+ The BigQuery connection used to create BigLake tables. Must be in the form `projects/{projectId}/locations/{locationId}/connections/{connection_id}`.
939
+ """
940
+ return pulumi.get(self, "connection")
941
+
942
+ @property
943
+ @pulumi.getter
944
+ def location(self) -> Optional[builtins.str]:
945
+ """
946
+ The location of the BigQuery dataset to publish BigLake external or non-BigLake external tables to.
947
+ """
948
+ return pulumi.get(self, "location")
949
+
950
+ @property
951
+ @pulumi.getter
952
+ def project(self) -> Optional[builtins.str]:
953
+ """
954
+ The project of the BigQuery dataset to publish BigLake external or non-BigLake external tables to. If not specified, the project of the Cloud Storage bucket will be used. The format is "projects/{project_id_or_number}".
955
+ """
956
+ return pulumi.get(self, "project")
957
+
958
+ @property
959
+ @pulumi.getter(name="tableType")
960
+ def table_type(self) -> Optional[builtins.str]:
961
+ """
962
+ Determines whether to publish discovered tables as BigLake external tables or non-BigLake external tables.
963
+ Possible values are: `TABLE_TYPE_UNSPECIFIED`, `EXTERNAL`, `BIGLAKE`.
964
+ """
965
+ return pulumi.get(self, "table_type")
966
+
967
+
968
+ @pulumi.output_type
969
+ class DatascanDataDiscoverySpecStorageConfig(dict):
970
+ @staticmethod
971
+ def __key_warning(key: str):
972
+ suggest = None
973
+ if key == "csvOptions":
974
+ suggest = "csv_options"
975
+ elif key == "excludePatterns":
976
+ suggest = "exclude_patterns"
977
+ elif key == "includePatterns":
978
+ suggest = "include_patterns"
979
+ elif key == "jsonOptions":
980
+ suggest = "json_options"
981
+
982
+ if suggest:
983
+ pulumi.log.warn(f"Key '{key}' not found in DatascanDataDiscoverySpecStorageConfig. Access the value via the '{suggest}' property getter instead.")
984
+
985
+ def __getitem__(self, key: str) -> Any:
986
+ DatascanDataDiscoverySpecStorageConfig.__key_warning(key)
987
+ return super().__getitem__(key)
988
+
989
+ def get(self, key: str, default = None) -> Any:
990
+ DatascanDataDiscoverySpecStorageConfig.__key_warning(key)
991
+ return super().get(key, default)
992
+
993
+ def __init__(__self__, *,
994
+ csv_options: Optional['outputs.DatascanDataDiscoverySpecStorageConfigCsvOptions'] = None,
995
+ exclude_patterns: Optional[Sequence[builtins.str]] = None,
996
+ include_patterns: Optional[Sequence[builtins.str]] = None,
997
+ json_options: Optional['outputs.DatascanDataDiscoverySpecStorageConfigJsonOptions'] = None):
998
+ """
999
+ :param 'DatascanDataDiscoverySpecStorageConfigCsvOptionsArgs' csv_options: Configuration for CSV data.
1000
+ Structure is documented below.
1001
+ :param Sequence[builtins.str] exclude_patterns: Defines the data to exclude during discovery. Provide a list of patterns that identify the data to exclude. For Cloud Storage bucket assets, these patterns are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these patterns are interpreted as patterns to match table names.
1002
+ :param Sequence[builtins.str] include_patterns: Defines the data to include during discovery when only a subset of the data should be considered. Provide a list of patterns that identify the data to include. For Cloud Storage bucket assets, these patterns are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these patterns are interpreted as patterns to match table names.
1003
+ :param 'DatascanDataDiscoverySpecStorageConfigJsonOptionsArgs' json_options: Configuration for JSON data.
1004
+ Structure is documented below.
1005
+ """
1006
+ if csv_options is not None:
1007
+ pulumi.set(__self__, "csv_options", csv_options)
1008
+ if exclude_patterns is not None:
1009
+ pulumi.set(__self__, "exclude_patterns", exclude_patterns)
1010
+ if include_patterns is not None:
1011
+ pulumi.set(__self__, "include_patterns", include_patterns)
1012
+ if json_options is not None:
1013
+ pulumi.set(__self__, "json_options", json_options)
1014
+
1015
+ @property
1016
+ @pulumi.getter(name="csvOptions")
1017
+ def csv_options(self) -> Optional['outputs.DatascanDataDiscoverySpecStorageConfigCsvOptions']:
1018
+ """
1019
+ Configuration for CSV data.
1020
+ Structure is documented below.
1021
+ """
1022
+ return pulumi.get(self, "csv_options")
1023
+
1024
+ @property
1025
+ @pulumi.getter(name="excludePatterns")
1026
+ def exclude_patterns(self) -> Optional[Sequence[builtins.str]]:
1027
+ """
1028
+ Defines the data to exclude during discovery. Provide a list of patterns that identify the data to exclude. For Cloud Storage bucket assets, these patterns are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these patterns are interpreted as patterns to match table names.
1029
+ """
1030
+ return pulumi.get(self, "exclude_patterns")
1031
+
1032
+ @property
1033
+ @pulumi.getter(name="includePatterns")
1034
+ def include_patterns(self) -> Optional[Sequence[builtins.str]]:
1035
+ """
1036
+ Defines the data to include during discovery when only a subset of the data should be considered. Provide a list of patterns that identify the data to include. For Cloud Storage bucket assets, these patterns are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these patterns are interpreted as patterns to match table names.
1037
+ """
1038
+ return pulumi.get(self, "include_patterns")
1039
+
1040
+ @property
1041
+ @pulumi.getter(name="jsonOptions")
1042
+ def json_options(self) -> Optional['outputs.DatascanDataDiscoverySpecStorageConfigJsonOptions']:
1043
+ """
1044
+ Configuration for JSON data.
1045
+ Structure is documented below.
1046
+ """
1047
+ return pulumi.get(self, "json_options")
1048
+
1049
+
1050
+ @pulumi.output_type
1051
+ class DatascanDataDiscoverySpecStorageConfigCsvOptions(dict):
1052
+ @staticmethod
1053
+ def __key_warning(key: str):
1054
+ suggest = None
1055
+ if key == "headerRows":
1056
+ suggest = "header_rows"
1057
+ elif key == "typeInferenceDisabled":
1058
+ suggest = "type_inference_disabled"
1059
+
1060
+ if suggest:
1061
+ pulumi.log.warn(f"Key '{key}' not found in DatascanDataDiscoverySpecStorageConfigCsvOptions. Access the value via the '{suggest}' property getter instead.")
1062
+
1063
+ def __getitem__(self, key: str) -> Any:
1064
+ DatascanDataDiscoverySpecStorageConfigCsvOptions.__key_warning(key)
1065
+ return super().__getitem__(key)
1066
+
1067
+ def get(self, key: str, default = None) -> Any:
1068
+ DatascanDataDiscoverySpecStorageConfigCsvOptions.__key_warning(key)
1069
+ return super().get(key, default)
1070
+
1071
+ def __init__(__self__, *,
1072
+ delimiter: Optional[builtins.str] = None,
1073
+ encoding: Optional[builtins.str] = None,
1074
+ header_rows: Optional[builtins.int] = None,
1075
+ quote: Optional[builtins.str] = None,
1076
+ type_inference_disabled: Optional[builtins.bool] = None):
1077
+ """
1078
+ :param builtins.str delimiter: The delimiter that is used to separate values. The default is `,` (comma).
1079
+ :param builtins.str encoding: The character encoding of the data. The default is UTF-8.
1080
+ :param builtins.int header_rows: The number of rows to interpret as header rows that should be skipped when reading data rows.
1081
+ :param builtins.str quote: The character used to quote column values. Accepts `"` (double quotation mark) or `'` (single quotation mark). If unspecified, defaults to `"` (double quotation mark).
1082
+ :param builtins.bool type_inference_disabled: Whether to disable the inference of data types for CSV data. If true, all columns are registered as strings.
1083
+ """
1084
+ if delimiter is not None:
1085
+ pulumi.set(__self__, "delimiter", delimiter)
1086
+ if encoding is not None:
1087
+ pulumi.set(__self__, "encoding", encoding)
1088
+ if header_rows is not None:
1089
+ pulumi.set(__self__, "header_rows", header_rows)
1090
+ if quote is not None:
1091
+ pulumi.set(__self__, "quote", quote)
1092
+ if type_inference_disabled is not None:
1093
+ pulumi.set(__self__, "type_inference_disabled", type_inference_disabled)
1094
+
1095
+ @property
1096
+ @pulumi.getter
1097
+ def delimiter(self) -> Optional[builtins.str]:
1098
+ """
1099
+ The delimiter that is used to separate values. The default is `,` (comma).
1100
+ """
1101
+ return pulumi.get(self, "delimiter")
1102
+
1103
+ @property
1104
+ @pulumi.getter
1105
+ def encoding(self) -> Optional[builtins.str]:
1106
+ """
1107
+ The character encoding of the data. The default is UTF-8.
1108
+ """
1109
+ return pulumi.get(self, "encoding")
1110
+
1111
+ @property
1112
+ @pulumi.getter(name="headerRows")
1113
+ def header_rows(self) -> Optional[builtins.int]:
1114
+ """
1115
+ The number of rows to interpret as header rows that should be skipped when reading data rows.
1116
+ """
1117
+ return pulumi.get(self, "header_rows")
1118
+
1119
+ @property
1120
+ @pulumi.getter
1121
+ def quote(self) -> Optional[builtins.str]:
1122
+ """
1123
+ The character used to quote column values. Accepts `"` (double quotation mark) or `'` (single quotation mark). If unspecified, defaults to `"` (double quotation mark).
1124
+ """
1125
+ return pulumi.get(self, "quote")
1126
+
1127
+ @property
1128
+ @pulumi.getter(name="typeInferenceDisabled")
1129
+ def type_inference_disabled(self) -> Optional[builtins.bool]:
1130
+ """
1131
+ Whether to disable the inference of data types for CSV data. If true, all columns are registered as strings.
1132
+ """
1133
+ return pulumi.get(self, "type_inference_disabled")
1134
+
1135
+
1136
+ @pulumi.output_type
1137
+ class DatascanDataDiscoverySpecStorageConfigJsonOptions(dict):
1138
+ @staticmethod
1139
+ def __key_warning(key: str):
1140
+ suggest = None
1141
+ if key == "typeInferenceDisabled":
1142
+ suggest = "type_inference_disabled"
1143
+
1144
+ if suggest:
1145
+ pulumi.log.warn(f"Key '{key}' not found in DatascanDataDiscoverySpecStorageConfigJsonOptions. Access the value via the '{suggest}' property getter instead.")
1146
+
1147
+ def __getitem__(self, key: str) -> Any:
1148
+ DatascanDataDiscoverySpecStorageConfigJsonOptions.__key_warning(key)
1149
+ return super().__getitem__(key)
1150
+
1151
+ def get(self, key: str, default = None) -> Any:
1152
+ DatascanDataDiscoverySpecStorageConfigJsonOptions.__key_warning(key)
1153
+ return super().get(key, default)
1154
+
1155
+ def __init__(__self__, *,
1156
+ encoding: Optional[builtins.str] = None,
1157
+ type_inference_disabled: Optional[builtins.bool] = None):
1158
+ """
1159
+ :param builtins.str encoding: The character encoding of the data. The default is UTF-8.
1160
+ :param builtins.bool type_inference_disabled: Whether to disable the inference of data types for JSON data. If true, all columns are registered as their primitive types (strings, number, or boolean).
1161
+ """
1162
+ if encoding is not None:
1163
+ pulumi.set(__self__, "encoding", encoding)
1164
+ if type_inference_disabled is not None:
1165
+ pulumi.set(__self__, "type_inference_disabled", type_inference_disabled)
1166
+
1167
+ @property
1168
+ @pulumi.getter
1169
+ def encoding(self) -> Optional[builtins.str]:
1170
+ """
1171
+ The character encoding of the data. The default is UTF-8.
1172
+ """
1173
+ return pulumi.get(self, "encoding")
1174
+
1175
+ @property
1176
+ @pulumi.getter(name="typeInferenceDisabled")
1177
+ def type_inference_disabled(self) -> Optional[builtins.bool]:
1178
+ """
1179
+ Whether to disable the inference of data types for JSON data. If true, all columns are registered as their primitive types (strings, number, or boolean).
1180
+ """
1181
+ return pulumi.get(self, "type_inference_disabled")
1182
+
1183
+
835
1184
  @pulumi.output_type
836
1185
  class DatascanDataProfileSpec(dict):
837
1186
  @staticmethod
@@ -1484,7 +1833,7 @@ class DatascanDataQualitySpecRule(dict):
1484
1833
  threshold: Optional[builtins.float] = None,
1485
1834
  uniqueness_expectation: Optional['outputs.DatascanDataQualitySpecRuleUniquenessExpectation'] = None):
1486
1835
  """
1487
- :param builtins.str dimension: The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]
1836
+ :param builtins.str dimension: The dimension name a rule belongs to. Custom dimension name is supported with all uppercase letters and maximum length of 30 characters.
1488
1837
  :param builtins.str column: The unnested column which this rule is evaluated against.
1489
1838
  :param builtins.str description: Description of the rule.
1490
1839
  The maximum length is 1,024 characters.
@@ -1546,7 +1895,7 @@ class DatascanDataQualitySpecRule(dict):
1546
1895
  @pulumi.getter
1547
1896
  def dimension(self) -> builtins.str:
1548
1897
  """
1549
- The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]
1898
+ The dimension name a rule belongs to. Custom dimension name is supported with all uppercase letters and maximum length of 30 characters.
1550
1899
  """
1551
1900
  return pulumi.get(self, "dimension")
1552
1901
 
@@ -574,11 +574,11 @@ class Task(pulumi.CustomResource):
574
574
 
575
575
  project = gcp.organizations.get_project()
576
576
  example = gcp.dataplex.Lake("example",
577
- name="tf-test-lake_88722",
577
+ name="tf-test-lake_8493",
578
578
  location="us-central1",
579
579
  project="my-project-name")
580
580
  example_task = gcp.dataplex.Task("example",
581
- task_id="tf-test-task_39249",
581
+ task_id="tf-test-task_9106",
582
582
  location="us-central1",
583
583
  lake=example.name,
584
584
  description="Test Task Basic",
@@ -612,15 +612,15 @@ class Task(pulumi.CustomResource):
612
612
 
613
613
  # VPC network
614
614
  default = gcp.compute.Network("default",
615
- name="tf-test-workstation-cluster_74391",
615
+ name="tf-test-workstation-cluster_27169",
616
616
  auto_create_subnetworks=True)
617
617
  project = gcp.organizations.get_project()
618
618
  example_spark = gcp.dataplex.Lake("example_spark",
619
- name="tf-test-lake_16511",
619
+ name="tf-test-lake_75223",
620
620
  location="us-central1",
621
621
  project="my-project-name")
622
622
  example_spark_task = gcp.dataplex.Task("example_spark",
623
- task_id="tf-test-task_8493",
623
+ task_id="tf-test-task_41819",
624
624
  location="us-central1",
625
625
  lake=example_spark.name,
626
626
  trigger_spec={
@@ -668,15 +668,15 @@ class Task(pulumi.CustomResource):
668
668
 
669
669
  # VPC network
670
670
  default = gcp.compute.Network("default",
671
- name="tf-test-workstation-cluster_9106",
671
+ name="tf-test-workstation-cluster_75092",
672
672
  auto_create_subnetworks=True)
673
673
  project = gcp.organizations.get_project()
674
674
  example_notebook = gcp.dataplex.Lake("example_notebook",
675
- name="tf-test-lake_27169",
675
+ name="tf-test-lake_2605",
676
676
  location="us-central1",
677
677
  project="my-project-name")
678
678
  example_notebook_task = gcp.dataplex.Task("example_notebook",
679
- task_id="tf-test-task_75223",
679
+ task_id="tf-test-task_34535",
680
680
  location="us-central1",
681
681
  lake=example_notebook.name,
682
682
  trigger_spec={
@@ -784,11 +784,11 @@ class Task(pulumi.CustomResource):
784
784
 
785
785
  project = gcp.organizations.get_project()
786
786
  example = gcp.dataplex.Lake("example",
787
- name="tf-test-lake_88722",
787
+ name="tf-test-lake_8493",
788
788
  location="us-central1",
789
789
  project="my-project-name")
790
790
  example_task = gcp.dataplex.Task("example",
791
- task_id="tf-test-task_39249",
791
+ task_id="tf-test-task_9106",
792
792
  location="us-central1",
793
793
  lake=example.name,
794
794
  description="Test Task Basic",
@@ -822,15 +822,15 @@ class Task(pulumi.CustomResource):
822
822
 
823
823
  # VPC network
824
824
  default = gcp.compute.Network("default",
825
- name="tf-test-workstation-cluster_74391",
825
+ name="tf-test-workstation-cluster_27169",
826
826
  auto_create_subnetworks=True)
827
827
  project = gcp.organizations.get_project()
828
828
  example_spark = gcp.dataplex.Lake("example_spark",
829
- name="tf-test-lake_16511",
829
+ name="tf-test-lake_75223",
830
830
  location="us-central1",
831
831
  project="my-project-name")
832
832
  example_spark_task = gcp.dataplex.Task("example_spark",
833
- task_id="tf-test-task_8493",
833
+ task_id="tf-test-task_41819",
834
834
  location="us-central1",
835
835
  lake=example_spark.name,
836
836
  trigger_spec={
@@ -878,15 +878,15 @@ class Task(pulumi.CustomResource):
878
878
 
879
879
  # VPC network
880
880
  default = gcp.compute.Network("default",
881
- name="tf-test-workstation-cluster_9106",
881
+ name="tf-test-workstation-cluster_75092",
882
882
  auto_create_subnetworks=True)
883
883
  project = gcp.organizations.get_project()
884
884
  example_notebook = gcp.dataplex.Lake("example_notebook",
885
- name="tf-test-lake_27169",
885
+ name="tf-test-lake_2605",
886
886
  location="us-central1",
887
887
  project="my-project-name")
888
888
  example_notebook_task = gcp.dataplex.Task("example_notebook",
889
- task_id="tf-test-task_75223",
889
+ task_id="tf-test-task_34535",
890
890
  location="us-central1",
891
891
  lake=example_notebook.name,
892
892
  trigger_spec={
@@ -44,6 +44,7 @@ from .metastore_service_iam_policy import *
44
44
  from .metastore_table_iam_binding import *
45
45
  from .metastore_table_iam_member import *
46
46
  from .metastore_table_iam_policy import *
47
+ from .session_template import *
47
48
  from .workflow_template import *
48
49
  from ._inputs import *
49
50
  from . import outputs