@maxim_mazurok/gapi.client.bigquery-v2 0.0.20250128 → 0.0.20250302

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.d.ts +39 -7
  2. package/package.json +1 -1
package/index.d.ts CHANGED
@@ -9,7 +9,7 @@
9
9
  // This file was generated by https://github.com/Maxim-Mazurok/google-api-typings-generator. Please do not edit it manually.
10
10
  // In case of any problems please post issue to https://github.com/Maxim-Mazurok/google-api-typings-generator
11
11
  // Generated from: https://bigquery.googleapis.com/$discovery/rest?version=v2
12
- // Revision: 20250128
12
+ // Revision: 20250302
13
13
 
14
14
  /// <reference types="gapi.client" />
15
15
 
@@ -37,7 +37,7 @@ declare namespace gapi.client {
37
37
  recall?: number;
38
38
  /** Area Under a ROC Curve. For multiclass this is a macro-averaged metric. */
39
39
  rocAuc?: number;
40
- /** Threshold at which the metrics are computed. For binary classification models this is the positive class threshold. For multi-class classfication models this is the confidence threshold. */
40
+ /** Threshold at which the metrics are computed. For binary classification models this is the positive class threshold. For multi-class classification models this is the confidence threshold. */
41
41
  threshold?: number;
42
42
  }
43
43
  interface AggregationThresholdPolicy {
@@ -369,12 +369,16 @@ declare namespace gapi.client {
369
369
  fieldDelimiter?: string;
370
370
  /** Optional. Specifies a string that represents a null value in a CSV file. For example, if you specify "\N", BigQuery interprets "\N" as a null value when querying a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value. */
371
371
  nullMarker?: string;
372
+ /** Optional. A list of strings represented as SQL NULL value in a CSV file. null_marker and null_markers can't be set at the same time. If null_marker is set, null_markers has to be not set. If null_markers is set, null_marker has to be not set. If both null_marker and null_markers are set at the same time, a user error would be thrown. Any strings listed in null_markers, including empty string would be interpreted as SQL NULL. This applies to all column types. */
373
+ nullMarkers?: string[];
372
374
  /** Optional. Indicates if the embedded ASCII control characters (the first 32 characters in the ASCII-table, from '\x00' to '\x1F') are preserved. */
373
375
  preserveAsciiControlCharacters?: boolean;
374
376
  /** Optional. The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ("). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true. To include the specific quote character within a quoted value, precede it with an additional matching quote character. For example, if you want to escape the default character ' " ', use ' "" '. */
375
377
  quote?: string;
376
378
  /** Optional. The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. */
377
379
  skipLeadingRows?: string;
380
+ /** Optional. Controls the strategy used to match loaded columns to the schema. If not set, a sensible default is chosen based on how the schema is provided. If autodetect is used, then columns are matched by name. Otherwise, columns are matched by position. This is done to keep the behavior backward-compatible. Acceptable values are: POSITION - matches by position. This assumes that the columns are ordered the same way as the schema. NAME - matches by name. This reads the header row as column names and reorders columns to match the field names in the schema. */
381
+ sourceColumnMatch?: string;
378
382
  }
379
383
  interface DataFormatOptions {
380
384
  /** Optional. Output timestamp as usec int64. Default is false. */
@@ -732,7 +736,11 @@ declare namespace gapi.client {
732
736
  connectionId?: string;
733
737
  /** Optional. Additional properties to set if sourceFormat is set to CSV. */
734
738
  csvOptions?: CsvOptions;
735
- /** Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: * (38,9) -> NUMERIC; * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); * (76,38) -> BIGNUMERIC; * (77,38) -> BIGNUMERIC (error if value exeeds supported range). This field cannot contain duplicate types. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other file formats. */
739
+ /** Optional. Format used to parse DATE values. Supports C-style and SQL-style values. */
740
+ dateFormat?: string;
741
+ /** Optional. Format used to parse DATETIME values. Supports C-style and SQL-style values. */
742
+ datetimeFormat?: string;
743
+ /** Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: * (38,9) -> NUMERIC; * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); * (76,38) -> BIGNUMERIC; * (77,38) -> BIGNUMERIC (error if value exceeds supported range). This field cannot contain duplicate types. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other file formats. */
736
744
  decimalTargetTypes?: string[];
737
745
  /** Optional. Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. */
738
746
  fileSetSpecType?: string;
@@ -762,6 +770,12 @@ declare namespace gapi.client {
762
770
  sourceFormat?: string;
763
771
  /** [Required] The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly one URI can be specified. Also, the '*' wildcard character is not allowed. */
764
772
  sourceUris?: string[];
773
+ /** Optional. Format used to parse TIME values. Supports C-style and SQL-style values. */
774
+ timeFormat?: string;
775
+ /** Optional. Format used to parse TIMESTAMP values. Supports C-style and SQL-style values. */
776
+ timestampFormat?: string;
777
+ /** Optional. Time zone used when parsing timestamp values that do not have specific time zone information (e.g. 2024-04-20 12:34:56). The expected format is a IANA timezone string (e.g. America/Los_Angeles). */
778
+ timeZone?: string;
765
779
  }
766
780
  interface ExternalDatasetReference {
767
781
  /** Required. The connection id that is used to access the external_source. Format: projects/{project_id}/locations/{location_id}/connections/{connection_id} */
@@ -1084,7 +1098,11 @@ declare namespace gapi.client {
1084
1098
  createDisposition?: string;
1085
1099
  /** Optional. If this property is true, the job creates a new session using a randomly generated session_id. To continue using a created session with subsequent queries, pass the existing session identifier as a `ConnectionProperty` value. The session identifier is returned as part of the `SessionInfo` message within the query statistics. The new session's location will be set to `Job.JobReference.location` if it is present, otherwise it's set to the default location based on existing routing logic. */
1086
1100
  createSession?: boolean;
1087
- /** Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: * (38,9) -> NUMERIC; * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); * (76,38) -> BIGNUMERIC; * (77,38) -> BIGNUMERIC (error if value exeeds supported range). This field cannot contain duplicate types. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other file formats. */
1101
+ /** Optional. Date format used for parsing DATE values. */
1102
+ dateFormat?: string;
1103
+ /** Optional. Date format used for parsing DATETIME values. */
1104
+ datetimeFormat?: string;
1105
+ /** Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: * (38,9) -> NUMERIC; * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); * (76,38) -> BIGNUMERIC; * (77,38) -> BIGNUMERIC (error if value exceeds supported range). This field cannot contain duplicate types. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other file formats. */
1088
1106
  decimalTargetTypes?: string[];
1089
1107
  /** Custom encryption configuration (e.g., Cloud KMS keys) */
1090
1108
  destinationEncryptionConfiguration?: EncryptionConfiguration;
@@ -1108,6 +1126,8 @@ declare namespace gapi.client {
1108
1126
  maxBadRecords?: number;
1109
1127
  /** Optional. Specifies a string that represents a null value in a CSV file. For example, if you specify "\N", BigQuery interprets "\N" as a null value when loading a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value. */
1110
1128
  nullMarker?: string;
1129
+ /** Optional. A list of strings represented as SQL NULL value in a CSV file. null_marker and null_markers can't be set at the same time. If null_marker is set, null_markers has to be not set. If null_markers is set, null_marker has to be not set. If both null_marker and null_markers are set at the same time, a user error would be thrown. Any strings listed in null_markers, including empty string would be interpreted as SQL NULL. This applies to all column types. */
1130
+ nullMarkers?: string[];
1111
1131
  /** Optional. Additional properties to set if sourceFormat is set to PARQUET. */
1112
1132
  parquetOptions?: ParquetOptions;
1113
1133
  /** Optional. When sourceFormat is set to "CSV", this indicates whether the embedded ASCII control characters (the first 32 characters in the ASCII-table, from '\x00' to '\x1F') are preserved. */
@@ -1130,12 +1150,20 @@ declare namespace gapi.client {
1130
1150
  schemaUpdateOptions?: string[];
1131
1151
  /** Optional. The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. */
1132
1152
  skipLeadingRows?: number;
1153
+ /** Optional. Controls the strategy used to match loaded columns to the schema. If not set, a sensible default is chosen based on how the schema is provided. If autodetect is used, then columns are matched by name. Otherwise, columns are matched by position. This is done to keep the behavior backward-compatible. */
1154
+ sourceColumnMatch?: string;
1133
1155
  /** Optional. The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". The default value is CSV. */
1134
1156
  sourceFormat?: string;
1135
1157
  /** [Required] The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed. */
1136
1158
  sourceUris?: string[];
1159
+ /** Optional. Date format used for parsing TIME values. */
1160
+ timeFormat?: string;
1137
1161
  /** Time-based partitioning specification for the destination table. Only one of timePartitioning and rangePartitioning should be specified. */
1138
1162
  timePartitioning?: TimePartitioning;
1163
+ /** Optional. Date format used for parsing TIMESTAMP values. */
1164
+ timestampFormat?: string;
1165
+ /** Optional. [Experimental] Default time zone that will apply when parsing timestamp values that have no specific time zone. */
1166
+ timeZone?: string;
1139
1167
  /** Optional. If sourceFormat is set to "AVRO", indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER). */
1140
1168
  useAvroLogicalTypes?: boolean;
1141
1169
  /** Optional. Specifies the action that occurs if the destination table already exists. The following values are supported: * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the data, removes the constraints and uses the schema from the load job. * WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_APPEND. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. */
@@ -1755,12 +1783,16 @@ declare namespace gapi.client {
1755
1783
  createSession?: boolean;
1756
1784
  /** Optional. Specifies the default datasetId and projectId to assume for any unqualified table names in the query. If not set, all table names in the query string must be qualified in the format 'datasetId.tableId'. */
1757
1785
  defaultDataset?: DatasetReference;
1786
+ /** Optional. Custom encryption configuration (e.g., Cloud KMS keys) */
1787
+ destinationEncryptionConfiguration?: EncryptionConfiguration;
1758
1788
  /** Optional. If set to true, BigQuery doesn't run the job. Instead, if the query is valid, BigQuery returns statistics about the job such as how many bytes would be processed. If the query is invalid, an error returns. The default value is false. */
1759
1789
  dryRun?: boolean;
1760
1790
  /** Optional. Output format adjustments. */
1761
1791
  formatOptions?: DataFormatOptions;
1762
1792
  /** Optional. If not set, jobs are always required. If set, the query request will follow the behavior described JobCreationMode. [Preview](https://cloud.google.com/products/#product-launch-stages) */
1763
1793
  jobCreationMode?: string;
1794
+ /** Optional. Job timeout in milliseconds. If this time limit is exceeded, BigQuery will attempt to stop a longer job, but may not always succeed in canceling it before the job completes. For example, a job that takes more than 60 seconds to complete has a better chance of being stopped than a job that takes 10 seconds to complete. This timeout applies to the query even if a job does not need to be created. */
1795
+ jobTimeoutMs?: string;
1764
1796
  /** The resource type of the request. */
1765
1797
  kind?: string;
1766
1798
  /** Optional. The labels associated with this query. Labels can be used to organize and group query jobs. Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label keys must start with a letter and each label in the list must have a different key. */
@@ -2441,7 +2473,7 @@ declare namespace gapi.client {
2441
2473
  type?: string;
2442
2474
  /** Information about a logical view. */
2443
2475
  view?: {
2444
- /** Specifices the privacy policy for the view. */
2476
+ /** Specifies the privacy policy for the view. */
2445
2477
  privacyPolicy?: PrivacyPolicy;
2446
2478
  /** True if view is defined in legacy SQL dialect, false if in GoogleSQL. */
2447
2479
  useLegacySql?: boolean;
@@ -2751,7 +2783,7 @@ declare namespace gapi.client {
2751
2783
  interface ViewDefinition {
2752
2784
  /** Optional. Foreign view representations. */
2753
2785
  foreignDefinitions?: ForeignViewDefinition[];
2754
- /** Optional. Specifices the privacy policy for the view. */
2786
+ /** Optional. Specifies the privacy policy for the view. */
2755
2787
  privacyPolicy?: PrivacyPolicy;
2756
2788
  /** Required. A query that BigQuery executes when the view is referenced. */
2757
2789
  query?: string;
@@ -2903,7 +2935,7 @@ declare namespace gapi.client {
2903
2935
  callback?: string;
2904
2936
  /** Selector specifying which fields to include in a partial response. */
2905
2937
  fields?: string;
2906
- /** An expression for filtering the results of the request by label. The syntax is `labels.[:]`. Multiple filters can be ANDed together by connecting with a space. Example: `labels.department:receiving labels.active`. See [Filtering datasets using labels](https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) for details. */
2938
+ /** An expression for filtering the results of the request by label. The syntax is `labels.[:]`. Multiple filters can be AND-ed together by connecting with a space. Example: `labels.department:receiving labels.active`. See [Filtering datasets using labels](https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) for details. */
2907
2939
  filter?: string;
2908
2940
  /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
2909
2941
  key?: string;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@maxim_mazurok/gapi.client.bigquery-v2",
3
- "version": "0.0.20250128",
3
+ "version": "0.0.20250302",
4
4
  "description": "TypeScript typings for BigQuery API v2",
5
5
  "repository": {
6
6
  "type": "git",