@maxim_mazurok/gapi.client.bigquery-v2 0.0.20250216 → 0.0.20250313
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.d.ts +13 -1
- package/package.json +1 -1
package/index.d.ts
CHANGED
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
// This file was generated by https://github.com/Maxim-Mazurok/google-api-typings-generator. Please do not edit it manually.
|
|
10
10
|
// In case of any problems please post issue to https://github.com/Maxim-Mazurok/google-api-typings-generator
|
|
11
11
|
// Generated from: https://bigquery.googleapis.com/$discovery/rest?version=v2
|
|
12
|
-
// Revision:
|
|
12
|
+
// Revision: 20250313
|
|
13
13
|
|
|
14
14
|
/// <reference types="gapi.client" />
|
|
15
15
|
|
|
@@ -369,12 +369,16 @@ declare namespace gapi.client {
|
|
|
369
369
|
fieldDelimiter?: string;
|
|
370
370
|
/** Optional. Specifies a string that represents a null value in a CSV file. For example, if you specify "\N", BigQuery interprets "\N" as a null value when querying a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value. */
|
|
371
371
|
nullMarker?: string;
|
|
372
|
+
/** Optional. A list of strings represented as SQL NULL value in a CSV file. null_marker and null_markers can't be set at the same time. If null_marker is set, null_markers has to be not set. If null_markers is set, null_marker has to be not set. If both null_marker and null_markers are set at the same time, a user error would be thrown. Any strings listed in null_markers, including empty string would be interpreted as SQL NULL. This applies to all column types. */
|
|
373
|
+
nullMarkers?: string[];
|
|
372
374
|
/** Optional. Indicates if the embedded ASCII control characters (the first 32 characters in the ASCII-table, from '\x00' to '\x1F') are preserved. */
|
|
373
375
|
preserveAsciiControlCharacters?: boolean;
|
|
374
376
|
/** Optional. The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ("). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true. To include the specific quote character within a quoted value, precede it with an additional matching quote character. For example, if you want to escape the default character ' " ', use ' "" '. */
|
|
375
377
|
quote?: string;
|
|
376
378
|
/** Optional. The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. */
|
|
377
379
|
skipLeadingRows?: string;
|
|
380
|
+
/** Optional. Controls the strategy used to match loaded columns to the schema. If not set, a sensible default is chosen based on how the schema is provided. If autodetect is used, then columns are matched by name. Otherwise, columns are matched by position. This is done to keep the behavior backward-compatible. Acceptable values are: POSITION - matches by position. This assumes that the columns are ordered the same way as the schema. NAME - matches by name. This reads the header row as column names and reorders columns to match the field names in the schema. */
|
|
381
|
+
sourceColumnMatch?: string;
|
|
378
382
|
}
|
|
379
383
|
interface DataFormatOptions {
|
|
380
384
|
/** Optional. Output timestamp as usec int64. Default is false. */
|
|
@@ -1052,6 +1056,8 @@ declare namespace gapi.client {
|
|
|
1052
1056
|
load?: JobConfigurationLoad;
|
|
1053
1057
|
/** [Pick one] Configures a query job. */
|
|
1054
1058
|
query?: JobConfigurationQuery;
|
|
1059
|
+
/** Optional. The reservation that job would use. User can specify a reservation to execute the job. If reservation is not set, reservation is determined based on the rules defined by the reservation assignments. The expected format is `projects/{project}/locations/{location}/reservations/{reservation}`. */
|
|
1060
|
+
reservation?: string;
|
|
1055
1061
|
}
|
|
1056
1062
|
interface JobConfigurationExtract {
|
|
1057
1063
|
/** Optional. The compression type to use for exported files. Possible values include DEFLATE, GZIP, NONE, SNAPPY, and ZSTD. The default value is NONE. Not all compression formats are support for all file formats. DEFLATE is only supported for Avro. ZSTD is only supported for Parquet. Not applicable when extracting models. */
|
|
@@ -1122,6 +1128,8 @@ declare namespace gapi.client {
|
|
|
1122
1128
|
maxBadRecords?: number;
|
|
1123
1129
|
/** Optional. Specifies a string that represents a null value in a CSV file. For example, if you specify "\N", BigQuery interprets "\N" as a null value when loading a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value. */
|
|
1124
1130
|
nullMarker?: string;
|
|
1131
|
+
/** Optional. A list of strings represented as SQL NULL value in a CSV file. null_marker and null_markers can't be set at the same time. If null_marker is set, null_markers has to be not set. If null_markers is set, null_marker has to be not set. If both null_marker and null_markers are set at the same time, a user error would be thrown. Any strings listed in null_markers, including empty string would be interpreted as SQL NULL. This applies to all column types. */
|
|
1132
|
+
nullMarkers?: string[];
|
|
1125
1133
|
/** Optional. Additional properties to set if sourceFormat is set to PARQUET. */
|
|
1126
1134
|
parquetOptions?: ParquetOptions;
|
|
1127
1135
|
/** Optional. When sourceFormat is set to "CSV", this indicates whether the embedded ASCII control characters (the first 32 characters in the ASCII-table, from '\x00' to '\x1F') are preserved. */
|
|
@@ -1144,6 +1152,8 @@ declare namespace gapi.client {
|
|
|
1144
1152
|
schemaUpdateOptions?: string[];
|
|
1145
1153
|
/** Optional. The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. */
|
|
1146
1154
|
skipLeadingRows?: number;
|
|
1155
|
+
/** Optional. Controls the strategy used to match loaded columns to the schema. If not set, a sensible default is chosen based on how the schema is provided. If autodetect is used, then columns are matched by name. Otherwise, columns are matched by position. This is done to keep the behavior backward-compatible. */
|
|
1156
|
+
sourceColumnMatch?: string;
|
|
1147
1157
|
/** Optional. The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". The default value is CSV. */
|
|
1148
1158
|
sourceFormat?: string;
|
|
1149
1159
|
/** [Required] The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed. */
|
|
@@ -1805,6 +1815,8 @@ declare namespace gapi.client {
|
|
|
1805
1815
|
queryParameters?: QueryParameter[];
|
|
1806
1816
|
/** Optional. A unique user provided identifier to ensure idempotent behavior for queries. Note that this is different from the job_id. It has the following properties: 1. It is case-sensitive, limited to up to 36 ASCII characters. A UUID is recommended. 2. Read only queries can ignore this token since they are nullipotent by definition. 3. For the purposes of idempotency ensured by the request_id, a request is considered duplicate of another only if they have the same request_id and are actually duplicates. When determining whether a request is a duplicate of another request, all parameters in the request that may affect the result are considered. For example, query, connection_properties, query_parameters, use_legacy_sql are parameters that affect the result and are considered when determining whether a request is a duplicate, but properties like timeout_ms don't affect the result and are thus not considered. Dry run query requests are never considered duplicate of another request. 4. When a duplicate mutating query request is detected, it returns: a. the results of the mutation if it completes successfully within the timeout. b. the running operation if it is still in progress at the end of the timeout. 5. Its lifetime is limited to 15 minutes. In other words, if two requests are sent with the same request_id, but more than 15 minutes apart, idempotency is not guaranteed. */
|
|
1807
1817
|
requestId?: string;
|
|
1818
|
+
/** Optional. The reservation that jobs.query request would use. User can specify a reservation to execute the job.query. The expected format is `projects/{project}/locations/{location}/reservations/{reservation}`. */
|
|
1819
|
+
reservation?: string;
|
|
1808
1820
|
/** Optional. Optional: Specifies the maximum amount of time, in milliseconds, that the client is willing to wait for the query to complete. By default, this limit is 10 seconds (10,000 milliseconds). If the query is complete, the jobComplete field in the response is true. If the query has not yet completed, jobComplete is false. You can request a longer timeout period in the timeoutMs field. However, the call is not guaranteed to wait for the specified timeout; it typically returns after around 200 seconds (200,000 milliseconds), even if the query is not complete. If jobComplete is false, you can continue to wait for the query to complete by calling the getQueryResults method until the jobComplete field in the getQueryResults response is true. */
|
|
1809
1821
|
timeoutMs?: number;
|
|
1810
1822
|
/** Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's GoogleSQL: https://cloud.google.com/bigquery/sql-reference/ When useLegacySql is set to false, the value of flattenResults is ignored; query will be run as if flattenResults is false. */
|