@maxim_mazurok/gapi.client.dataproc-v1 0.0.20240830 → 0.0.20240928

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.d.ts +2419 -127
  2. package/package.json +1 -1
package/index.d.ts CHANGED
@@ -9,7 +9,7 @@
9
9
  // This file was generated by https://github.com/Maxim-Mazurok/google-api-typings-generator. Please do not edit it manually.
10
10
  // In case of any problems please post issue to https://github.com/Maxim-Mazurok/google-api-typings-generator
11
11
  // Generated from: https://dataproc.googleapis.com/$discovery/rest?version=v1
12
- // Revision: 20240830
12
+ // Revision: 20240928
13
13
 
14
14
  /// <reference types="gapi.client" />
15
15
 
@@ -30,6 +30,68 @@ declare namespace gapi.client {
30
30
  /** Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. */
31
31
  acceleratorTypeUri?: string;
32
32
  }
33
+ interface AccessSessionSparkApplicationEnvironmentInfoResponse {
34
+ /** Details about the Environment that the application is running in. */
35
+ applicationEnvironmentInfo?: ApplicationEnvironmentInfo;
36
+ }
37
+ interface AccessSessionSparkApplicationJobResponse {
38
+ /** Output only. Data corresponding to a spark job. */
39
+ jobData?: JobData;
40
+ }
41
+ interface AccessSessionSparkApplicationResponse {
42
+ /** Output only. High level information corresponding to an application. */
43
+ application?: ApplicationInfo;
44
+ }
45
+ interface AccessSessionSparkApplicationSqlQueryResponse {
46
+ /** SQL Execution Data */
47
+ executionData?: SqlExecutionUiData;
48
+ }
49
+ interface AccessSessionSparkApplicationSqlSparkPlanGraphResponse {
50
+ /** SparkPlanGraph for a Spark Application execution. */
51
+ sparkPlanGraph?: SparkPlanGraph;
52
+ }
53
+ interface AccessSessionSparkApplicationStageAttemptResponse {
54
+ /** Output only. Data corresponding to a stage. */
55
+ stageData?: StageData;
56
+ }
57
+ interface AccessSessionSparkApplicationStageRddOperationGraphResponse {
58
+ /** RDD operation graph for a Spark Application Stage. */
59
+ rddOperationGraph?: RddOperationGraph;
60
+ }
61
+ interface AccessSparkApplicationEnvironmentInfoResponse {
62
+ /** Details about the Environment that the application is running in. */
63
+ applicationEnvironmentInfo?: ApplicationEnvironmentInfo;
64
+ }
65
+ interface AccessSparkApplicationJobResponse {
66
+ /** Output only. Data corresponding to a spark job. */
67
+ jobData?: JobData;
68
+ }
69
+ interface AccessSparkApplicationResponse {
70
+ /** Output only. High level information corresponding to an application. */
71
+ application?: ApplicationInfo;
72
+ }
73
+ interface AccessSparkApplicationSqlQueryResponse {
74
+ /** SQL Execution Data */
75
+ executionData?: SqlExecutionUiData;
76
+ }
77
+ interface AccessSparkApplicationSqlSparkPlanGraphResponse {
78
+ /** SparkPlanGraph for a Spark Application execution. */
79
+ sparkPlanGraph?: SparkPlanGraph;
80
+ }
81
+ interface AccessSparkApplicationStageAttemptResponse {
82
+ /** Output only. Data corresponding to a stage. */
83
+ stageData?: StageData;
84
+ }
85
+ interface AccessSparkApplicationStageRddOperationGraphResponse {
86
+ /** RDD operation graph for a Spark Application Stage. */
87
+ rddOperationGraph?: RddOperationGraph;
88
+ }
89
+ interface AccumulableInfo {
90
+ accumullableInfoId?: string;
91
+ name?: string;
92
+ update?: string;
93
+ value?: string;
94
+ }
33
95
  interface AnalyzeBatchRequest {
34
96
  /** Optional. A unique ID used to identify the request. If the service receives two AnalyzeBatchRequest (http://cloud/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.AnalyzeBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first request created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. */
35
97
  requestId?: string;
@@ -52,6 +114,40 @@ declare namespace gapi.client {
52
114
  /** Output only. Warnings encountered during operation execution. */
53
115
  warnings?: string[];
54
116
  }
117
+ interface ApplicationAttemptInfo {
118
+ appSparkVersion?: string;
119
+ attemptId?: string;
120
+ completed?: boolean;
121
+ durationMillis?: string;
122
+ endTime?: string;
123
+ lastUpdated?: string;
124
+ sparkUser?: string;
125
+ startTime?: string;
126
+ }
127
+ interface ApplicationEnvironmentInfo {
128
+ classpathEntries?: {[P in string]: string};
129
+ hadoopProperties?: {[P in string]: string};
130
+ metricsProperties?: {[P in string]: string};
131
+ resourceProfiles?: ResourceProfileInfo[];
132
+ runtime?: SparkRuntimeInfo;
133
+ sparkProperties?: {[P in string]: string};
134
+ systemProperties?: {[P in string]: string};
135
+ }
136
+ interface ApplicationInfo {
137
+ applicationContextIngestionStatus?: string;
138
+ applicationId?: string;
139
+ attempts?: ApplicationAttemptInfo[];
140
+ coresGranted?: number;
141
+ coresPerExecutor?: number;
142
+ maxCores?: number;
143
+ memoryPerExecutorMb?: number;
144
+ name?: string;
145
+ quantileDataStatus?: string;
146
+ }
147
+ interface AppSummary {
148
+ numCompletedJobs?: number;
149
+ numCompletedStages?: number;
150
+ }
55
151
  interface AutoscalingConfig {
56
152
  /** Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region. */
57
153
  policyUri?: string;
@@ -284,10 +380,33 @@ declare namespace gapi.client {
284
380
  /** Output only. Additional state information that includes status reported by the agent. */
285
381
  substate?: string;
286
382
  }
383
+ interface ClusterToRepair {
384
+ /** Required. Repair action to take on the cluster resource. */
385
+ clusterRepairAction?: string;
386
+ }
287
387
  interface ConfidentialInstanceConfig {
288
388
  /** Optional. Defines whether the instance should have confidential compute enabled. */
289
389
  enableConfidentialCompute?: boolean;
290
390
  }
391
+ interface ConsolidatedExecutorSummary {
392
+ activeTasks?: number;
393
+ completedTasks?: number;
394
+ count?: number;
395
+ diskUsed?: string;
396
+ failedTasks?: number;
397
+ isExcluded?: number;
398
+ maxMemory?: string;
399
+ memoryMetrics?: MemoryMetrics;
400
+ memoryUsed?: string;
401
+ rddBlocks?: number;
402
+ totalCores?: number;
403
+ totalDurationMillis?: string;
404
+ totalGcTimeMillis?: string;
405
+ totalInputBytes?: string;
406
+ totalShuffleRead?: string;
407
+ totalShuffleWrite?: string;
408
+ totalTasks?: number;
409
+ }
291
410
  interface DataprocMetricConfig {
292
411
  /** Required. Metrics sources to enable. */
293
412
  metrics?: Metric[];
@@ -369,6 +488,89 @@ declare namespace gapi.client {
369
488
  /** Optional. The duration after which the workload will be terminated, specified as the JSON representation for Duration (https://protobuf.dev/programming-guides/proto3/#json). When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first. */
370
489
  ttl?: string;
371
490
  }
491
+ interface ExecutorMetrics {
492
+ metrics?: {[P in string]: string};
493
+ }
494
+ interface ExecutorMetricsDistributions {
495
+ diskBytesSpilled?: number[];
496
+ failedTasks?: number[];
497
+ inputBytes?: number[];
498
+ inputRecords?: number[];
499
+ killedTasks?: number[];
500
+ memoryBytesSpilled?: number[];
501
+ outputBytes?: number[];
502
+ outputRecords?: number[];
503
+ peakMemoryMetrics?: ExecutorPeakMetricsDistributions;
504
+ quantiles?: number[];
505
+ shuffleRead?: number[];
506
+ shuffleReadRecords?: number[];
507
+ shuffleWrite?: number[];
508
+ shuffleWriteRecords?: number[];
509
+ succeededTasks?: number[];
510
+ taskTimeMillis?: number[];
511
+ }
512
+ interface ExecutorPeakMetricsDistributions {
513
+ executorMetrics?: ExecutorMetrics[];
514
+ quantiles?: number[];
515
+ }
516
+ interface ExecutorResourceRequest {
517
+ amount?: string;
518
+ discoveryScript?: string;
519
+ resourceName?: string;
520
+ vendor?: string;
521
+ }
522
+ interface ExecutorStageSummary {
523
+ diskBytesSpilled?: string;
524
+ executorId?: string;
525
+ failedTasks?: number;
526
+ inputBytes?: string;
527
+ inputRecords?: string;
528
+ isExcludedForStage?: boolean;
529
+ killedTasks?: number;
530
+ memoryBytesSpilled?: string;
531
+ outputBytes?: string;
532
+ outputRecords?: string;
533
+ peakMemoryMetrics?: ExecutorMetrics;
534
+ shuffleRead?: string;
535
+ shuffleReadRecords?: string;
536
+ shuffleWrite?: string;
537
+ shuffleWriteRecords?: string;
538
+ stageAttemptId?: number;
539
+ stageId?: string;
540
+ succeededTasks?: number;
541
+ taskTimeMillis?: string;
542
+ }
543
+ interface ExecutorSummary {
544
+ activeTasks?: number;
545
+ addTime?: string;
546
+ attributes?: {[P in string]: string};
547
+ completedTasks?: number;
548
+ diskUsed?: string;
549
+ excludedInStages?: string[];
550
+ executorId?: string;
551
+ executorLogs?: {[P in string]: string};
552
+ failedTasks?: number;
553
+ hostPort?: string;
554
+ isActive?: boolean;
555
+ isExcluded?: boolean;
556
+ maxMemory?: string;
557
+ maxTasks?: number;
558
+ memoryMetrics?: MemoryMetrics;
559
+ memoryUsed?: string;
560
+ peakMemoryMetrics?: ExecutorMetrics;
561
+ rddBlocks?: number;
562
+ removeReason?: string;
563
+ removeTime?: string;
564
+ resourceProfileId?: number;
565
+ resources?: {[P in string]: ResourceInformation};
566
+ totalCores?: number;
567
+ totalDurationMillis?: string;
568
+ totalGcTimeMillis?: string;
569
+ totalInputBytes?: string;
570
+ totalShuffleRead?: string;
571
+ totalShuffleWrite?: string;
572
+ totalTasks?: number;
573
+ }
372
574
  interface Expr {
373
575
  /** Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */
374
576
  description?: string;
@@ -390,7 +592,7 @@ declare namespace gapi.client {
390
592
  mainClass?: string;
391
593
  /** The HCFS URI of the jar file that contains the main class. */
392
594
  mainJarFileUri?: string;
393
- /** Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. */
595
+ /** Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. */
394
596
  properties?: {[P in string]: string};
395
597
  /** Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. */
396
598
  savepointUri?: string;
@@ -531,11 +733,21 @@ declare namespace gapi.client {
531
733
  /** Required. The encrypted credentials being injected in to the cluster.The client is responsible for encrypting the credentials in a way that is supported by the cluster.A wrapped value is used here so that the actual contents of the encrypted credentials are not written to audit logs. */
532
734
  credentialsCiphertext?: string;
533
735
  }
736
+ interface InputMetrics {
737
+ bytesRead?: string;
738
+ recordsRead?: string;
739
+ }
740
+ interface InputQuantileMetrics {
741
+ bytesRead?: Quantiles;
742
+ recordsRead?: Quantiles;
743
+ }
534
744
  interface InstanceFlexibilityPolicy {
535
745
  /** Optional. List of instance selection options that the group will use when creating new VMs. */
536
746
  instanceSelectionList?: InstanceSelection[];
537
747
  /** Output only. A list of instance selection results in the group. */
538
748
  instanceSelectionResults?: InstanceSelectionResult[];
749
+ /** Optional. Defines how the Group selects the provisioning model to ensure required reliability. */
750
+ provisioningModelMix?: ProvisioningModelMix;
539
751
  }
540
752
  interface InstanceGroupAutoscalingPolicyConfig {
541
753
  /** Required. Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set.Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0. */
@@ -657,6 +869,30 @@ declare namespace gapi.client {
657
869
  /** Output only. The collection of YARN applications spun up by this job.Beta Feature: This report is available for testing purposes only. It might be changed before final release. */
658
870
  yarnApplications?: YarnApplication[];
659
871
  }
872
+ interface JobData {
873
+ completionTime?: string;
874
+ description?: string;
875
+ jobGroup?: string;
876
+ jobId?: string;
877
+ killTasksSummary?: {[P in string]: number};
878
+ name?: string;
879
+ numActiveStages?: number;
880
+ numActiveTasks?: number;
881
+ numCompletedIndices?: number;
882
+ numCompletedStages?: number;
883
+ numCompletedTasks?: number;
884
+ numFailedStages?: number;
885
+ numFailedTasks?: number;
886
+ numKilledTasks?: number;
887
+ numSkippedStages?: number;
888
+ numSkippedTasks?: number;
889
+ numTasks?: number;
890
+ skippedStages?: number[];
891
+ sqlExecutionId?: string;
892
+ stageIds?: string[];
893
+ status?: string;
894
+ submissionTime?: string;
895
+ }
660
896
  interface JobMetadata {
661
897
  /** Output only. The job id. */
662
898
  jobId?: string;
@@ -687,6 +923,20 @@ declare namespace gapi.client {
687
923
  /** Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). */
688
924
  maxFailuresTotal?: number;
689
925
  }
926
+ interface JobsSummary {
927
+ /** Number of active jobs */
928
+ activeJobs?: number;
929
+ /** Spark Application Id */
930
+ applicationId?: string;
931
+ /** Attempts info */
932
+ attempts?: ApplicationAttemptInfo[];
933
+ /** Number of completed jobs */
934
+ completedJobs?: number;
935
+ /** Number of failed jobs */
936
+ failedJobs?: number;
937
+ /** Spark Scheduling mode */
938
+ schedulingMode?: string;
939
+ }
690
940
  interface JobStatus {
691
941
  /** Optional. Output only. Job state details, such as an error description if the state is ERROR. */
692
942
  details?: string;
@@ -833,6 +1083,12 @@ declare namespace gapi.client {
833
1083
  /** Output only. The name of the Instance Template used for the Managed Instance Group. */
834
1084
  instanceTemplateName?: string;
835
1085
  }
1086
+ interface MemoryMetrics {
1087
+ totalOffHeapStorageMemory?: string;
1088
+ totalOnHeapStorageMemory?: string;
1089
+ usedOffHeapStorageMemory?: string;
1090
+ usedOnHeapStorageMemory?: string;
1091
+ }
836
1092
  interface MetastoreConfig {
837
1093
  /** Required. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name] */
838
1094
  dataprocMetastoreService?: string;
@@ -850,7 +1106,7 @@ declare namespace gapi.client {
850
1106
  targetGkeCluster?: string;
851
1107
  }
852
1108
  interface NodeGroup {
853
- /** Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. */
1109
+ /** Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. */
854
1110
  labels?: {[P in string]: string};
855
1111
  /** The Node group resource name (https://aip.dev/122). */
856
1112
  name?: string;
@@ -937,6 +1193,14 @@ declare namespace gapi.client {
937
1193
  /** Optional. Job is a Trino job. */
938
1194
  trinoJob?: TrinoJob;
939
1195
  }
1196
+ interface OutputMetrics {
1197
+ bytesWritten?: string;
1198
+ recordsWritten?: string;
1199
+ }
1200
+ interface OutputQuantileMetrics {
1201
+ bytesWritten?: Quantiles;
1202
+ recordsWritten?: Quantiles;
1203
+ }
940
1204
  interface ParameterValidation {
941
1205
  /** Validation based on regular expressions. */
942
1206
  regex?: RegexValidation;
@@ -973,6 +1237,10 @@ declare namespace gapi.client {
973
1237
  /** Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies). */
974
1238
  version?: number;
975
1239
  }
1240
+ interface PoolData {
1241
+ name?: string;
1242
+ stageIds?: string[];
1243
+ }
976
1244
  interface PrestoJob {
977
1245
  /** Optional. Presto client tags to attach to this query */
978
1246
  clientTags?: string[];
@@ -989,6 +1257,21 @@ declare namespace gapi.client {
989
1257
  /** A list of queries. */
990
1258
  queryList?: QueryList;
991
1259
  }
1260
+ interface ProcessSummary {
1261
+ addTime?: string;
1262
+ hostPort?: string;
1263
+ isActive?: boolean;
1264
+ processId?: string;
1265
+ processLogs?: {[P in string]: string};
1266
+ removeTime?: string;
1267
+ totalCores?: number;
1268
+ }
1269
+ interface ProvisioningModelMix {
1270
+ /** Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. */
1271
+ standardCapacityBase?: number;
1272
+ /** Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. */
1273
+ standardCapacityPercentAboveBase?: number;
1274
+ }
992
1275
  interface PyPiRepositoryConfig {
993
1276
  /** Optional. PyPi repository address */
994
1277
  pypiRepository?: string;
@@ -1025,15 +1308,79 @@ declare namespace gapi.client {
1025
1308
  /** Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. */
1026
1309
  pythonFileUris?: string[];
1027
1310
  }
1311
+ interface Quantiles {
1312
+ count?: string;
1313
+ maximum?: string;
1314
+ minimum?: string;
1315
+ percentile25?: string;
1316
+ percentile50?: string;
1317
+ percentile75?: string;
1318
+ sum?: string;
1319
+ }
1028
1320
  interface QueryList {
1029
1321
  /** Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } */
1030
1322
  queries?: string[];
1031
1323
  }
1324
+ interface RddDataDistribution {
1325
+ address?: string;
1326
+ diskUsed?: string;
1327
+ memoryRemaining?: string;
1328
+ memoryUsed?: string;
1329
+ offHeapMemoryRemaining?: string;
1330
+ offHeapMemoryUsed?: string;
1331
+ onHeapMemoryRemaining?: string;
1332
+ onHeapMemoryUsed?: string;
1333
+ }
1334
+ interface RddOperationCluster {
1335
+ childClusters?: RddOperationCluster[];
1336
+ childNodes?: RddOperationNode[];
1337
+ name?: string;
1338
+ rddClusterId?: string;
1339
+ }
1340
+ interface RddOperationEdge {
1341
+ fromId?: number;
1342
+ toId?: number;
1343
+ }
1344
+ interface RddOperationGraph {
1345
+ edges?: RddOperationEdge[];
1346
+ incomingEdges?: RddOperationEdge[];
1347
+ outgoingEdges?: RddOperationEdge[];
1348
+ rootCluster?: RddOperationCluster;
1349
+ stageId?: string;
1350
+ }
1351
+ interface RddOperationNode {
1352
+ barrier?: boolean;
1353
+ cached?: boolean;
1354
+ callsite?: string;
1355
+ name?: string;
1356
+ nodeId?: number;
1357
+ outputDeterministicLevel?: string;
1358
+ }
1359
+ interface RddPartitionInfo {
1360
+ blockName?: string;
1361
+ diskUsed?: string;
1362
+ executors?: string[];
1363
+ memoryUsed?: string;
1364
+ storageLevel?: string;
1365
+ }
1366
+ interface RddStorageInfo {
1367
+ dataDistribution?: RddDataDistribution[];
1368
+ diskUsed?: string;
1369
+ memoryUsed?: string;
1370
+ name?: string;
1371
+ numCachedPartitions?: number;
1372
+ numPartitions?: number;
1373
+ partitions?: RddPartitionInfo[];
1374
+ rddStorageId?: number;
1375
+ storageLevel?: string;
1376
+ }
1032
1377
  interface RegexValidation {
1033
1378
  /** Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient). */
1034
1379
  regexes?: string[];
1035
1380
  }
1036
1381
  interface RepairClusterRequest {
1382
+ /** Optional. Cluster to be repaired */
1383
+ cluster?: ClusterToRepair;
1037
1384
  /** Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist. */
1038
1385
  clusterUuid?: string;
1039
1386
  /** Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning facilitates the removal of cluster nodes without interrupting jobs in progress. The timeout specifies the amount of time to wait for jobs finish before forcefully removing nodes. The default timeout is 0 for forceful decommissioning, and the maximum timeout period is 1 day. (see JSON Mapping—Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).graceful_decommission_timeout is supported in Dataproc image versions 1.2+. */
@@ -1075,6 +1422,15 @@ declare namespace gapi.client {
1075
1422
  /** Required. The number of running instances for the node group to maintain. The group adds or removes instances to maintain the number of instances specified by this parameter. */
1076
1423
  size?: number;
1077
1424
  }
1425
+ interface ResourceInformation {
1426
+ addresses?: string[];
1427
+ name?: string;
1428
+ }
1429
+ interface ResourceProfileInfo {
1430
+ executorResources?: {[P in string]: ExecutorResourceRequest};
1431
+ resourceProfileId?: number;
1432
+ taskResources?: {[P in string]: TaskResourceRequest};
1433
+ }
1078
1434
  interface RuntimeConfig {
1079
1435
  /** Optional. Autotuning configuration of the workload. */
1080
1436
  autotuningConfig?: AutotuningConfig;
@@ -1101,6 +1457,102 @@ declare namespace gapi.client {
1101
1457
  /** Output only. A URI pointing to the location of the stdout and stderr of the workload. */
1102
1458
  outputUri?: string;
1103
1459
  }
1460
+ interface SearchSessionSparkApplicationExecutorsResponse {
1461
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationExecutorsRequest. */
1462
+ nextPageToken?: string;
1463
+ /** Details about executors used by the application. */
1464
+ sparkApplicationExecutors?: ExecutorSummary[];
1465
+ }
1466
+ interface SearchSessionSparkApplicationExecutorStageSummaryResponse {
1467
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationExecutorStageSummaryRequest. */
1468
+ nextPageToken?: string;
1469
+ /** Details about executors used by the application stage. */
1470
+ sparkApplicationStageExecutors?: ExecutorStageSummary[];
1471
+ }
1472
+ interface SearchSessionSparkApplicationJobsResponse {
1473
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationJobsRequest. */
1474
+ nextPageToken?: string;
1475
+ /** Output only. Data corresponding to a spark job. */
1476
+ sparkApplicationJobs?: JobData[];
1477
+ }
1478
+ interface SearchSessionSparkApplicationSqlQueriesResponse {
1479
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationSqlQueriesRequest. */
1480
+ nextPageToken?: string;
1481
+ /** Output only. SQL Execution Data */
1482
+ sparkApplicationSqlQueries?: SqlExecutionUiData[];
1483
+ }
1484
+ interface SearchSessionSparkApplicationsResponse {
1485
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationsRequest. */
1486
+ nextPageToken?: string;
1487
+ /** Output only. High level information corresponding to an application. */
1488
+ sparkApplications?: SparkApplication[];
1489
+ }
1490
+ interface SearchSessionSparkApplicationStageAttemptsResponse {
1491
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationStageAttemptsRequest. */
1492
+ nextPageToken?: string;
1493
+ /** Output only. Data corresponding to a stage attempts */
1494
+ sparkApplicationStageAttempts?: StageData[];
1495
+ }
1496
+ interface SearchSessionSparkApplicationStageAttemptTasksResponse {
1497
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationStageAttemptTasksRequest. */
1498
+ nextPageToken?: string;
1499
+ /** Output only. Data corresponding to tasks created by spark. */
1500
+ sparkApplicationStageAttemptTasks?: TaskData[];
1501
+ }
1502
+ interface SearchSessionSparkApplicationStagesResponse {
1503
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationStages. */
1504
+ nextPageToken?: string;
1505
+ /** Output only. Data corresponding to a stage. */
1506
+ sparkApplicationStages?: StageData[];
1507
+ }
1508
+ interface SearchSparkApplicationExecutorsResponse {
1509
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationExecutorsListRequest. */
1510
+ nextPageToken?: string;
1511
+ /** Details about executors used by the application. */
1512
+ sparkApplicationExecutors?: ExecutorSummary[];
1513
+ }
1514
+ interface SearchSparkApplicationExecutorStageSummaryResponse {
1515
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationExecutorsListRequest. */
1516
+ nextPageToken?: string;
1517
+ /** Details about executors used by the application stage. */
1518
+ sparkApplicationStageExecutors?: ExecutorStageSummary[];
1519
+ }
1520
+ interface SearchSparkApplicationJobsResponse {
1521
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationJobsRequest. */
1522
+ nextPageToken?: string;
1523
+ /** Output only. Data corresponding to a spark job. */
1524
+ sparkApplicationJobs?: JobData[];
1525
+ }
1526
+ interface SearchSparkApplicationSqlQueriesResponse {
1527
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationSqlQueriesRequest. */
1528
+ nextPageToken?: string;
1529
+ /** Output only. SQL Execution Data */
1530
+ sparkApplicationSqlQueries?: SqlExecutionUiData[];
1531
+ }
1532
+ interface SearchSparkApplicationsResponse {
1533
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationsRequest. */
1534
+ nextPageToken?: string;
1535
+ /** Output only. High level information corresponding to an application. */
1536
+ sparkApplications?: SparkApplication[];
1537
+ }
1538
+ interface SearchSparkApplicationStageAttemptsResponse {
1539
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListSparkApplicationStageAttemptsRequest. */
1540
+ nextPageToken?: string;
1541
+ /** Output only. Data corresponding to a stage attempts */
1542
+ sparkApplicationStageAttempts?: StageData[];
1543
+ }
1544
+ interface SearchSparkApplicationStageAttemptTasksResponse {
1545
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListSparkApplicationStageAttemptTasksRequest. */
1546
+ nextPageToken?: string;
1547
+ /** Output only. Data corresponding to tasks created by spark. */
1548
+ sparkApplicationStageAttemptTasks?: TaskData[];
1549
+ }
1550
+ interface SearchSparkApplicationStagesResponse {
1551
+ /** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationStages. */
1552
+ nextPageToken?: string;
1553
+ /** Output only. Data corresponding to a stage. */
1554
+ sparkApplicationStages?: StageData[];
1555
+ }
1104
1556
  interface SecurityConfig {
1105
1557
  /** Optional. Identity related configuration, including service account based secure multi-tenancy user mappings. */
1106
1558
  identityConfig?: IdentityConfig;
@@ -1203,6 +1655,66 @@ declare namespace gapi.client {
1203
1655
  /** Optional. Defines whether instances have the vTPM enabled. */
1204
1656
  enableVtpm?: boolean;
1205
1657
  }
1658
+ interface ShufflePushReadMetrics {
1659
+ corruptMergedBlockChunks?: string;
1660
+ localMergedBlocksFetched?: string;
1661
+ localMergedBytesRead?: string;
1662
+ localMergedChunksFetched?: string;
1663
+ mergedFetchFallbackCount?: string;
1664
+ remoteMergedBlocksFetched?: string;
1665
+ remoteMergedBytesRead?: string;
1666
+ remoteMergedChunksFetched?: string;
1667
+ remoteMergedReqsDuration?: string;
1668
+ }
1669
+ interface ShufflePushReadQuantileMetrics {
1670
+ corruptMergedBlockChunks?: Quantiles;
1671
+ localMergedBlocksFetched?: Quantiles;
1672
+ localMergedBytesRead?: Quantiles;
1673
+ localMergedChunksFetched?: Quantiles;
1674
+ mergedFetchFallbackCount?: Quantiles;
1675
+ remoteMergedBlocksFetched?: Quantiles;
1676
+ remoteMergedBytesRead?: Quantiles;
1677
+ remoteMergedChunksFetched?: Quantiles;
1678
+ remoteMergedReqsDuration?: Quantiles;
1679
+ }
1680
+ interface ShuffleReadMetrics {
1681
+ fetchWaitTimeMillis?: string;
1682
+ localBlocksFetched?: string;
1683
+ localBytesRead?: string;
1684
+ recordsRead?: string;
1685
+ remoteBlocksFetched?: string;
1686
+ remoteBytesRead?: string;
1687
+ remoteBytesReadToDisk?: string;
1688
+ remoteReqsDuration?: string;
1689
+ shufflePushReadMetrics?: ShufflePushReadMetrics;
1690
+ }
1691
+ interface ShuffleReadQuantileMetrics {
1692
+ fetchWaitTimeMillis?: Quantiles;
1693
+ localBlocksFetched?: Quantiles;
1694
+ readBytes?: Quantiles;
1695
+ readRecords?: Quantiles;
1696
+ remoteBlocksFetched?: Quantiles;
1697
+ remoteBytesRead?: Quantiles;
1698
+ remoteBytesReadToDisk?: Quantiles;
1699
+ remoteReqsDuration?: Quantiles;
1700
+ shufflePushReadMetrics?: ShufflePushReadQuantileMetrics;
1701
+ totalBlocksFetched?: Quantiles;
1702
+ }
1703
+ interface ShuffleWriteMetrics {
1704
+ bytesWritten?: string;
1705
+ recordsWritten?: string;
1706
+ writeTimeNanos?: string;
1707
+ }
1708
+ interface ShuffleWriteQuantileMetrics {
1709
+ writeBytes?: Quantiles;
1710
+ writeRecords?: Quantiles;
1711
+ writeTimeNanos?: Quantiles;
1712
+ }
1713
+ interface SinkProgress {
1714
+ description?: string;
1715
+ metrics?: {[P in string]: string};
1716
+ numOutputRows?: string;
1717
+ }
1206
1718
  interface SoftwareConfig {
1207
1719
  /** Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported-dataproc-image-versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version. */
1208
1720
  imageVersion?: string;
@@ -1211,6 +1723,22 @@ declare namespace gapi.client {
1211
1723
  /** Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties). */
1212
1724
  properties?: {[P in string]: string};
1213
1725
  }
1726
+ interface SourceProgress {
1727
+ description?: string;
1728
+ endOffset?: string;
1729
+ inputRowsPerSecond?: number;
1730
+ latestOffset?: string;
1731
+ metrics?: {[P in string]: string};
1732
+ numInputRows?: string;
1733
+ processedRowsPerSecond?: number;
1734
+ startOffset?: string;
1735
+ }
1736
+ interface SparkApplication {
1737
+ /** Output only. High level information corresponding to an application. */
1738
+ application?: ApplicationInfo;
1739
+ /** Identifier. Name of the spark application */
1740
+ name?: string;
1741
+ }
1214
1742
  interface SparkBatch {
1215
1743
  /** Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */
1216
1744
  archiveUris?: string[];
@@ -1248,6 +1776,32 @@ declare namespace gapi.client {
1248
1776
  /** Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. */
1249
1777
  properties?: {[P in string]: string};
1250
1778
  }
1779
+ interface SparkPlanGraph {
1780
+ edges?: SparkPlanGraphEdge[];
1781
+ executionId?: string;
1782
+ nodes?: SparkPlanGraphNodeWrapper[];
1783
+ }
1784
+ interface SparkPlanGraphCluster {
1785
+ desc?: string;
1786
+ metrics?: SqlPlanMetric[];
1787
+ name?: string;
1788
+ nodes?: SparkPlanGraphNodeWrapper[];
1789
+ sparkPlanGraphClusterId?: string;
1790
+ }
1791
+ interface SparkPlanGraphEdge {
1792
+ fromId?: string;
1793
+ toId?: string;
1794
+ }
1795
+ interface SparkPlanGraphNode {
1796
+ desc?: string;
1797
+ metrics?: SqlPlanMetric[];
1798
+ name?: string;
1799
+ sparkPlanGraphNodeId?: string;
1800
+ }
1801
+ interface SparkPlanGraphNodeWrapper {
1802
+ cluster?: SparkPlanGraphCluster;
1803
+ node?: SparkPlanGraphNode;
1804
+ }
1251
1805
  interface SparkRBatch {
1252
1806
  /** Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */
1253
1807
  archiveUris?: string[];
@@ -1272,6 +1826,11 @@ declare namespace gapi.client {
1272
1826
  /** Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. */
1273
1827
  properties?: {[P in string]: string};
1274
1828
  }
1829
+ interface SparkRuntimeInfo {
1830
+ javaHome?: string;
1831
+ javaVersion?: string;
1832
+ scalaVersion?: string;
1833
+ }
1275
1834
  interface SparkSqlBatch {
1276
1835
  /** Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. */
1277
1836
  jarFileUris?: string[];
@@ -1308,6 +1867,168 @@ declare namespace gapi.client {
1308
1867
  /** Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. */
1309
1868
  scaleUpMinWorkerFraction?: number;
1310
1869
  }
1870
+ interface SparkWrapperObject {
1871
+ applicationEnvironmentInfo?: ApplicationEnvironmentInfo;
1872
+ /** Application Id created by Spark. */
1873
+ applicationId?: string;
1874
+ applicationInfo?: ApplicationInfo;
1875
+ appSummary?: AppSummary;
1876
+ /** VM Timestamp associated with the data object. */
1877
+ eventTimestamp?: string;
1878
+ executorStageSummary?: ExecutorStageSummary;
1879
+ executorSummary?: ExecutorSummary;
1880
+ jobData?: JobData;
1881
+ poolData?: PoolData;
1882
+ processSummary?: ProcessSummary;
1883
+ rddOperationGraph?: RddOperationGraph;
1884
+ rddStorageInfo?: RddStorageInfo;
1885
+ resourceProfileInfo?: ResourceProfileInfo;
1886
+ sparkPlanGraph?: SparkPlanGraph;
1887
+ speculationStageSummary?: SpeculationStageSummary;
1888
+ sqlExecutionUiData?: SqlExecutionUiData;
1889
+ stageData?: StageData;
1890
+ streamBlockData?: StreamBlockData;
1891
+ streamingQueryData?: StreamingQueryData;
1892
+ streamingQueryProgress?: StreamingQueryProgress;
1893
+ taskData?: TaskData;
1894
+ }
1895
+ interface SpeculationStageSummary {
1896
+ numActiveTasks?: number;
1897
+ numCompletedTasks?: number;
1898
+ numFailedTasks?: number;
1899
+ numKilledTasks?: number;
1900
+ numTasks?: number;
1901
+ stageAttemptId?: number;
1902
+ stageId?: string;
1903
+ }
1904
+ interface SqlExecutionUiData {
1905
+ completionTime?: string;
1906
+ description?: string;
1907
+ details?: string;
1908
+ errorMessage?: string;
1909
+ executionId?: string;
1910
+ jobs?: {[P in string]: string};
1911
+ metrics?: SqlPlanMetric[];
1912
+ metricValues?: {[P in string]: string};
1913
+ metricValuesIsNull?: boolean;
1914
+ modifiedConfigs?: {[P in string]: string};
1915
+ physicalPlanDescription?: string;
1916
+ rootExecutionId?: string;
1917
+ stages?: string[];
1918
+ submissionTime?: string;
1919
+ }
1920
+ interface SqlPlanMetric {
1921
+ accumulatorId?: string;
1922
+ metricType?: string;
1923
+ name?: string;
1924
+ }
1925
+ interface StageAttemptTasksSummary {
1926
+ applicationId?: string;
1927
+ numFailedTasks?: number;
1928
+ numKilledTasks?: number;
1929
+ numPendingTasks?: number;
1930
+ numRunningTasks?: number;
1931
+ numSuccessTasks?: number;
1932
+ numTasks?: number;
1933
+ stageAttemptId?: number;
1934
+ stageId?: string;
1935
+ }
1936
+ interface StageData {
1937
+ accumulatorUpdates?: AccumulableInfo[];
1938
+ completionTime?: string;
1939
+ description?: string;
1940
+ details?: string;
1941
+ executorMetricsDistributions?: ExecutorMetricsDistributions;
1942
+ executorSummary?: {[P in string]: ExecutorStageSummary};
1943
+ failureReason?: string;
1944
+ firstTaskLaunchedTime?: string;
1945
+ isShufflePushEnabled?: boolean;
1946
+ jobIds?: string[];
1947
+ killedTasksSummary?: {[P in string]: number};
1948
+ locality?: {[P in string]: string};
1949
+ name?: string;
1950
+ numActiveTasks?: number;
1951
+ numCompletedIndices?: number;
1952
+ numCompleteTasks?: number;
1953
+ numFailedTasks?: number;
1954
+ numKilledTasks?: number;
1955
+ numTasks?: number;
1956
+ parentStageIds?: string[];
1957
+ peakExecutorMetrics?: ExecutorMetrics;
1958
+ rddIds?: string[];
1959
+ resourceProfileId?: number;
1960
+ schedulingPool?: string;
1961
+ shuffleMergersCount?: number;
1962
+ speculationSummary?: SpeculationStageSummary;
1963
+ stageAttemptId?: number;
1964
+ stageId?: string;
1965
+ stageMetrics?: StageMetrics;
1966
+ status?: string;
1967
+ submissionTime?: string;
1968
+ /** Summary metrics fields. These are included in response only if present in summary_metrics_mask field in request */
1969
+ taskQuantileMetrics?: TaskQuantileMetrics;
1970
+ tasks?: {[P in string]: TaskData};
1971
+ }
1972
+ interface StageInputMetrics {
1973
+ bytesRead?: string;
1974
+ recordsRead?: string;
1975
+ }
1976
+ interface StageMetrics {
1977
+ diskBytesSpilled?: string;
1978
+ executorCpuTimeNanos?: string;
1979
+ executorDeserializeCpuTimeNanos?: string;
1980
+ executorDeserializeTimeMillis?: string;
1981
+ executorRunTimeMillis?: string;
1982
+ jvmGcTimeMillis?: string;
1983
+ memoryBytesSpilled?: string;
1984
+ peakExecutionMemoryBytes?: string;
1985
+ resultSerializationTimeMillis?: string;
1986
+ resultSize?: string;
1987
+ stageInputMetrics?: StageInputMetrics;
1988
+ stageOutputMetrics?: StageOutputMetrics;
1989
+ stageShuffleReadMetrics?: StageShuffleReadMetrics;
1990
+ stageShuffleWriteMetrics?: StageShuffleWriteMetrics;
1991
+ }
1992
+ interface StageOutputMetrics {
1993
+ bytesWritten?: string;
1994
+ recordsWritten?: string;
1995
+ }
1996
+ interface StageShufflePushReadMetrics {
1997
+ corruptMergedBlockChunks?: string;
1998
+ localMergedBlocksFetched?: string;
1999
+ localMergedBytesRead?: string;
2000
+ localMergedChunksFetched?: string;
2001
+ mergedFetchFallbackCount?: string;
2002
+ remoteMergedBlocksFetched?: string;
2003
+ remoteMergedBytesRead?: string;
2004
+ remoteMergedChunksFetched?: string;
2005
+ remoteMergedReqsDuration?: string;
2006
+ }
2007
+ interface StageShuffleReadMetrics {
2008
+ bytesRead?: string;
2009
+ fetchWaitTimeMillis?: string;
2010
+ localBlocksFetched?: string;
2011
+ localBytesRead?: string;
2012
+ recordsRead?: string;
2013
+ remoteBlocksFetched?: string;
2014
+ remoteBytesRead?: string;
2015
+ remoteBytesReadToDisk?: string;
2016
+ remoteReqsDuration?: string;
2017
+ stageShufflePushReadMetrics?: StageShufflePushReadMetrics;
2018
+ }
2019
+ interface StageShuffleWriteMetrics {
2020
+ bytesWritten?: string;
2021
+ recordsWritten?: string;
2022
+ writeTimeNanos?: string;
2023
+ }
2024
+ interface StagesSummary {
2025
+ applicationId?: string;
2026
+ numActiveStages?: number;
2027
+ numCompletedStages?: number;
2028
+ numFailedStages?: number;
2029
+ numPendingStages?: number;
2030
+ numSkippedStages?: number;
2031
+ }
1311
2032
  interface StartClusterRequest {
1312
2033
  /** Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist. */
1313
2034
  clusterUuid?: string;
@@ -1326,6 +2047,20 @@ declare namespace gapi.client {
1326
2047
  /** Output only. The time when the batch entered the historical state. */
1327
2048
  stateStartTime?: string;
1328
2049
  }
2050
+ interface StateOperatorProgress {
2051
+ allRemovalsTimeMs?: string;
2052
+ allUpdatesTimeMs?: string;
2053
+ commitTimeMs?: string;
2054
+ customMetrics?: {[P in string]: string};
2055
+ memoryUsedBytes?: string;
2056
+ numRowsDroppedByWatermark?: string;
2057
+ numRowsRemoved?: string;
2058
+ numRowsTotal?: string;
2059
+ numRowsUpdated?: string;
2060
+ numShufflePartitions?: string;
2061
+ numStateStoreInstances?: string;
2062
+ operatorName?: string;
2063
+ }
1329
2064
  interface Status {
1330
2065
  /** The status code, which should be an enum value of google.rpc.Code. */
1331
2066
  code?: number;
@@ -1340,12 +2075,152 @@ declare namespace gapi.client {
1340
2075
  /** Optional. A unique ID used to identify the request. If the server receives two StopClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. */
1341
2076
  requestId?: string;
1342
2077
  }
2078
+ interface StreamBlockData {
2079
+ deserialized?: boolean;
2080
+ diskSize?: string;
2081
+ executorId?: string;
2082
+ hostPort?: string;
2083
+ memSize?: string;
2084
+ name?: string;
2085
+ storageLevel?: string;
2086
+ useDisk?: boolean;
2087
+ useMemory?: boolean;
2088
+ }
2089
+ interface StreamingQueryData {
2090
+ endTimestamp?: string;
2091
+ exception?: string;
2092
+ isActive?: boolean;
2093
+ name?: string;
2094
+ runId?: string;
2095
+ startTimestamp?: string;
2096
+ streamingQueryId?: string;
2097
+ }
2098
+ interface StreamingQueryProgress {
2099
+ batchDuration?: string;
2100
+ batchId?: string;
2101
+ durationMillis?: {[P in string]: string};
2102
+ eventTime?: {[P in string]: string};
2103
+ name?: string;
2104
+ observedMetrics?: {[P in string]: string};
2105
+ runId?: string;
2106
+ sink?: SinkProgress;
2107
+ sources?: SourceProgress[];
2108
+ stateOperators?: StateOperatorProgress[];
2109
+ streamingQueryProgressId?: string;
2110
+ timestamp?: string;
2111
+ }
1343
2112
  interface SubmitJobRequest {
1344
2113
  /** Required. The job resource. */
1345
2114
  job?: Job;
1346
2115
  /** Optional. A unique id used to identify the request. If the server receives two SubmitJobRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s with the same id, then the second request will be ignored and the first Job created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. */
1347
2116
  requestId?: string;
1348
2117
  }
2118
+ interface SummarizeSessionSparkApplicationExecutorsResponse {
2119
+ /** Consolidated summary for active executors. */
2120
+ activeExecutorSummary?: ConsolidatedExecutorSummary;
2121
+ /** Spark Application Id */
2122
+ applicationId?: string;
2123
+ /** Consolidated summary for dead executors. */
2124
+ deadExecutorSummary?: ConsolidatedExecutorSummary;
2125
+ /** Overall consolidated summary for all executors. */
2126
+ totalExecutorSummary?: ConsolidatedExecutorSummary;
2127
+ }
2128
+ interface SummarizeSessionSparkApplicationJobsResponse {
2129
+ /** Summary of a Spark Application Jobs */
2130
+ jobsSummary?: JobsSummary;
2131
+ }
2132
+ interface SummarizeSessionSparkApplicationStageAttemptTasksResponse {
2133
+ /** Summary of tasks for a Spark Application Stage Attempt */
2134
+ stageAttemptTasksSummary?: StageAttemptTasksSummary;
2135
+ }
2136
+ interface SummarizeSessionSparkApplicationStagesResponse {
2137
+ /** Summary of a Spark Application Stages */
2138
+ stagesSummary?: StagesSummary;
2139
+ }
2140
+ interface SummarizeSparkApplicationExecutorsResponse {
2141
+ /** Consolidated summary for active executors. */
2142
+ activeExecutorSummary?: ConsolidatedExecutorSummary;
2143
+ /** Spark Application Id */
2144
+ applicationId?: string;
2145
+ /** Consolidated summary for dead executors. */
2146
+ deadExecutorSummary?: ConsolidatedExecutorSummary;
2147
+ /** Overall consolidated summary for all executors. */
2148
+ totalExecutorSummary?: ConsolidatedExecutorSummary;
2149
+ }
2150
+ interface SummarizeSparkApplicationJobsResponse {
2151
+ /** Summary of a Spark Application Jobs */
2152
+ jobsSummary?: JobsSummary;
2153
+ }
2154
+ interface SummarizeSparkApplicationStageAttemptTasksResponse {
2155
+ /** Summary of tasks for a Spark Application Stage Attempt */
2156
+ stageAttemptTasksSummary?: StageAttemptTasksSummary;
2157
+ }
2158
+ interface SummarizeSparkApplicationStagesResponse {
2159
+ /** Summary of a Spark Application Stages */
2160
+ stagesSummary?: StagesSummary;
2161
+ }
2162
+ interface TaskData {
2163
+ accumulatorUpdates?: AccumulableInfo[];
2164
+ attempt?: number;
2165
+ durationMillis?: string;
2166
+ errorMessage?: string;
2167
+ executorId?: string;
2168
+ executorLogs?: {[P in string]: string};
2169
+ gettingResultTimeMillis?: string;
2170
+ hasMetrics?: boolean;
2171
+ host?: string;
2172
+ index?: number;
2173
+ launchTime?: string;
2174
+ partitionId?: number;
2175
+ resultFetchStart?: string;
2176
+ schedulerDelayMillis?: string;
2177
+ speculative?: boolean;
2178
+ stageAttemptId?: number;
2179
+ stageId?: string;
2180
+ status?: string;
2181
+ taskId?: string;
2182
+ taskLocality?: string;
2183
+ taskMetrics?: TaskMetrics;
2184
+ }
2185
+ interface TaskMetrics {
2186
+ diskBytesSpilled?: string;
2187
+ executorCpuTimeNanos?: string;
2188
+ executorDeserializeCpuTimeNanos?: string;
2189
+ executorDeserializeTimeMillis?: string;
2190
+ executorRunTimeMillis?: string;
2191
+ inputMetrics?: InputMetrics;
2192
+ jvmGcTimeMillis?: string;
2193
+ memoryBytesSpilled?: string;
2194
+ outputMetrics?: OutputMetrics;
2195
+ peakExecutionMemoryBytes?: string;
2196
+ resultSerializationTimeMillis?: string;
2197
+ resultSize?: string;
2198
+ shuffleReadMetrics?: ShuffleReadMetrics;
2199
+ shuffleWriteMetrics?: ShuffleWriteMetrics;
2200
+ }
2201
+ interface TaskQuantileMetrics {
2202
+ diskBytesSpilled?: Quantiles;
2203
+ durationMillis?: Quantiles;
2204
+ executorCpuTimeNanos?: Quantiles;
2205
+ executorDeserializeCpuTimeNanos?: Quantiles;
2206
+ executorDeserializeTimeMillis?: Quantiles;
2207
+ executorRunTimeMillis?: Quantiles;
2208
+ gettingResultTimeMillis?: Quantiles;
2209
+ inputMetrics?: InputQuantileMetrics;
2210
+ jvmGcTimeMillis?: Quantiles;
2211
+ memoryBytesSpilled?: Quantiles;
2212
+ outputMetrics?: OutputQuantileMetrics;
2213
+ peakExecutionMemoryBytes?: Quantiles;
2214
+ resultSerializationTimeMillis?: Quantiles;
2215
+ resultSize?: Quantiles;
2216
+ schedulerDelayMillis?: Quantiles;
2217
+ shuffleReadMetrics?: ShuffleReadQuantileMetrics;
2218
+ shuffleWriteMetrics?: ShuffleWriteQuantileMetrics;
2219
+ }
2220
+ interface TaskResourceRequest {
2221
+ amount?: number;
2222
+ resourceName?: string;
2223
+ }
1349
2224
  interface TemplateParameter {
1350
2225
  /** Optional. Brief description of the parameter. Must not exceed 1024 characters. */
1351
2226
  description?: string;
@@ -1497,6 +2372,19 @@ declare namespace gapi.client {
1497
2372
  /** A cluster that is managed by the workflow. */
1498
2373
  managedCluster?: ManagedCluster;
1499
2374
  }
2375
+ interface WriteSessionSparkApplicationContextRequest {
2376
+ /** Required. Parent (Batch) resource reference. */
2377
+ parent?: string;
2378
+ /** Required. The batch of spark application context objects sent for ingestion. */
2379
+ sparkWrapperObjects?: SparkWrapperObject[];
2380
+ }
2381
+ interface WriteSessionSparkApplicationContextResponse {}
2382
+ interface WriteSparkApplicationContextRequest {
2383
+ /** Required. Parent (Batch) resource reference. */
2384
+ parent?: string;
2385
+ sparkWrapperObjects?: SparkWrapperObject[];
2386
+ }
2387
+ interface WriteSparkApplicationContextResponse {}
1500
2388
  interface YarnApplication {
1501
2389
  /** Required. The application name. */
1502
2390
  name?: string;
@@ -1800,9 +2688,9 @@ declare namespace gapi.client {
1800
2688
  body: AutoscalingPolicy
1801
2689
  ): Request<AutoscalingPolicy>;
1802
2690
  }
1803
- interface BatchesResource {
1804
- /** Analyze a Batch for possible recommendations and insights. */
1805
- analyze(request: {
2691
+ interface SparkApplicationsResource {
2692
+ /** Obtain high level information corresponding to a single Spark Application. */
2693
+ access(request?: {
1806
2694
  /** V1 error format. */
1807
2695
  '$.xgafv'?: string;
1808
2696
  /** OAuth access token. */
@@ -1815,10 +2703,12 @@ declare namespace gapi.client {
1815
2703
  fields?: string;
1816
2704
  /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
1817
2705
  key?: string;
1818
- /** Required. The fully qualified name of the batch to analyze in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
2706
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
1819
2707
  name: string;
1820
2708
  /** OAuth 2.0 token for the current user. */
1821
2709
  oauth_token?: string;
2710
+ /** Required. Parent (Batch) resource reference. */
2711
+ parent?: string;
1822
2712
  /** Returns response with indentations and line breaks. */
1823
2713
  prettyPrint?: boolean;
1824
2714
  /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
@@ -1827,106 +2717,38 @@ declare namespace gapi.client {
1827
2717
  upload_protocol?: string;
1828
2718
  /** Legacy upload protocol for media (e.g. "media", "multipart"). */
1829
2719
  uploadType?: string;
1830
- /** Request body */
1831
- resource: AnalyzeBatchRequest;
1832
- }): Request<Operation>;
1833
- analyze(
1834
- request: {
1835
- /** V1 error format. */
1836
- '$.xgafv'?: string;
1837
- /** OAuth access token. */
1838
- access_token?: string;
1839
- /** Data format for response. */
1840
- alt?: string;
1841
- /** JSONP */
1842
- callback?: string;
1843
- /** Selector specifying which fields to include in a partial response. */
1844
- fields?: string;
1845
- /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
1846
- key?: string;
1847
- /** Required. The fully qualified name of the batch to analyze in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
1848
- name: string;
1849
- /** OAuth 2.0 token for the current user. */
1850
- oauth_token?: string;
1851
- /** Returns response with indentations and line breaks. */
1852
- prettyPrint?: boolean;
1853
- /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
1854
- quotaUser?: string;
1855
- /** Upload protocol for media (e.g. "raw", "multipart"). */
1856
- upload_protocol?: string;
1857
- /** Legacy upload protocol for media (e.g. "media", "multipart"). */
1858
- uploadType?: string;
1859
- },
1860
- body: AnalyzeBatchRequest
1861
- ): Request<Operation>;
1862
- /** Creates a batch workload that executes asynchronously. */
1863
- create(request: {
2720
+ }): Request<AccessSparkApplicationResponse>;
2721
+ /** Obtain environment details for a Spark Application */
2722
+ accessEnvironmentInfo(request?: {
1864
2723
  /** V1 error format. */
1865
2724
  '$.xgafv'?: string;
1866
2725
  /** OAuth access token. */
1867
2726
  access_token?: string;
1868
2727
  /** Data format for response. */
1869
2728
  alt?: string;
1870
- /** Optional. The ID to use for the batch, which will become the final component of the batch's resource name.This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/. */
1871
- batchId?: string;
1872
2729
  /** JSONP */
1873
2730
  callback?: string;
1874
2731
  /** Selector specifying which fields to include in a partial response. */
1875
2732
  fields?: string;
1876
2733
  /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
1877
2734
  key?: string;
2735
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
2736
+ name: string;
1878
2737
  /** OAuth 2.0 token for the current user. */
1879
2738
  oauth_token?: string;
1880
- /** Required. The parent resource where this batch will be created. */
1881
- parent: string;
2739
+ /** Required. Parent (Batch) resource reference. */
2740
+ parent?: string;
1882
2741
  /** Returns response with indentations and line breaks. */
1883
2742
  prettyPrint?: boolean;
1884
2743
  /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
1885
2744
  quotaUser?: string;
1886
- /** Optional. A unique ID used to identify the request. If the service receives two CreateBatchRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first Batch created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. */
1887
- requestId?: string;
1888
2745
  /** Upload protocol for media (e.g. "raw", "multipart"). */
1889
2746
  upload_protocol?: string;
1890
2747
  /** Legacy upload protocol for media (e.g. "media", "multipart"). */
1891
2748
  uploadType?: string;
1892
- /** Request body */
1893
- resource: Batch;
1894
- }): Request<Operation>;
1895
- create(
1896
- request: {
1897
- /** V1 error format. */
1898
- '$.xgafv'?: string;
1899
- /** OAuth access token. */
1900
- access_token?: string;
1901
- /** Data format for response. */
1902
- alt?: string;
1903
- /** Optional. The ID to use for the batch, which will become the final component of the batch's resource name.This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/. */
1904
- batchId?: string;
1905
- /** JSONP */
1906
- callback?: string;
1907
- /** Selector specifying which fields to include in a partial response. */
1908
- fields?: string;
1909
- /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
1910
- key?: string;
1911
- /** OAuth 2.0 token for the current user. */
1912
- oauth_token?: string;
1913
- /** Required. The parent resource where this batch will be created. */
1914
- parent: string;
1915
- /** Returns response with indentations and line breaks. */
1916
- prettyPrint?: boolean;
1917
- /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
1918
- quotaUser?: string;
1919
- /** Optional. A unique ID used to identify the request. If the service receives two CreateBatchRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first Batch created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. */
1920
- requestId?: string;
1921
- /** Upload protocol for media (e.g. "raw", "multipart"). */
1922
- upload_protocol?: string;
1923
- /** Legacy upload protocol for media (e.g. "media", "multipart"). */
1924
- uploadType?: string;
1925
- },
1926
- body: Batch
1927
- ): Request<Operation>;
1928
- /** Deletes the batch workload resource. If the batch is not in a CANCELLED, SUCCEEDED or FAILED State, the delete operation fails and the response returns FAILED_PRECONDITION. */
1929
- delete(request?: {
2749
+ }): Request<AccessSparkApplicationEnvironmentInfoResponse>;
2750
+ /** Obtain data corresponding to a spark job for a Spark Application. */
2751
+ accessJob(request?: {
1930
2752
  /** V1 error format. */
1931
2753
  '$.xgafv'?: string;
1932
2754
  /** OAuth access token. */
@@ -1937,12 +2759,16 @@ declare namespace gapi.client {
1937
2759
  callback?: string;
1938
2760
  /** Selector specifying which fields to include in a partial response. */
1939
2761
  fields?: string;
2762
+ /** Required. Job ID to fetch data for. */
2763
+ jobId?: string;
1940
2764
  /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
1941
2765
  key?: string;
1942
- /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
2766
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
1943
2767
  name: string;
1944
2768
  /** OAuth 2.0 token for the current user. */
1945
2769
  oauth_token?: string;
2770
+ /** Required. Parent (Batch) resource reference. */
2771
+ parent?: string;
1946
2772
  /** Returns response with indentations and line breaks. */
1947
2773
  prettyPrint?: boolean;
1948
2774
  /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
@@ -1951,9 +2777,9 @@ declare namespace gapi.client {
1951
2777
  upload_protocol?: string;
1952
2778
  /** Legacy upload protocol for media (e.g. "media", "multipart"). */
1953
2779
  uploadType?: string;
1954
- }): Request<{}>;
1955
- /** Gets the batch workload resource representation. */
1956
- get(request?: {
2780
+ }): Request<AccessSparkApplicationJobResponse>;
2781
+ /** Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000. */
2782
+ accessSqlPlan(request?: {
1957
2783
  /** V1 error format. */
1958
2784
  '$.xgafv'?: string;
1959
2785
  /** OAuth access token. */
@@ -1962,14 +2788,18 @@ declare namespace gapi.client {
1962
2788
  alt?: string;
1963
2789
  /** JSONP */
1964
2790
  callback?: string;
2791
+ /** Required. Execution ID */
2792
+ executionId?: string;
1965
2793
  /** Selector specifying which fields to include in a partial response. */
1966
2794
  fields?: string;
1967
2795
  /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
1968
2796
  key?: string;
1969
- /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
2797
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
1970
2798
  name: string;
1971
2799
  /** OAuth 2.0 token for the current user. */
1972
2800
  oauth_token?: string;
2801
+ /** Required. Parent (Batch) resource reference. */
2802
+ parent?: string;
1973
2803
  /** Returns response with indentations and line breaks. */
1974
2804
  prettyPrint?: boolean;
1975
2805
  /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
@@ -1978,9 +2808,9 @@ declare namespace gapi.client {
1978
2808
  upload_protocol?: string;
1979
2809
  /** Legacy upload protocol for media (e.g. "media", "multipart"). */
1980
2810
  uploadType?: string;
1981
- }): Request<Batch>;
1982
- /** Lists batch workloads. */
1983
- list(request?: {
2811
+ }): Request<AccessSparkApplicationSqlSparkPlanGraphResponse>;
2812
+ /** Obtain data corresponding to a particular SQL Query for a Spark Application. */
2813
+ accessSqlQuery(request?: {
1984
2814
  /** V1 error format. */
1985
2815
  '$.xgafv'?: string;
1986
2816
  /** OAuth access token. */
@@ -1989,22 +2819,22 @@ declare namespace gapi.client {
1989
2819
  alt?: string;
1990
2820
  /** JSONP */
1991
2821
  callback?: string;
2822
+ /** Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide. */
2823
+ details?: boolean;
2824
+ /** Required. Execution ID */
2825
+ executionId?: string;
1992
2826
  /** Selector specifying which fields to include in a partial response. */
1993
2827
  fields?: string;
1994
- /** Optional. A filter for the batches to return in the response.A filter is a logical expression constraining the values of various fields in each batch resource. Filters are case sensitive, and may contain multiple clauses combined with logical operators (AND/OR). Supported fields are batch_id, batch_uuid, state, create_time, and labels.e.g. state = RUNNING and create_time < "2023-01-01T00:00:00Z" filters for batches in state RUNNING that were created before 2023-01-01. state = RUNNING and labels.environment=production filters for batches in state in a RUNNING state that have a production environment label.See https://google.aip.dev/assets/misc/ebnf-filtering.txt for a detailed description of the filter syntax and a list of supported comparisons. */
1995
- filter?: string;
1996
2828
  /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
1997
2829
  key?: string;
2830
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
2831
+ name: string;
1998
2832
  /** OAuth 2.0 token for the current user. */
1999
2833
  oauth_token?: string;
2000
- /** Optional. Field(s) on which to sort the list of batches.Currently the only supported sort orders are unspecified (empty) and create_time desc to sort by most recently created batches first.See https://google.aip.dev/132#ordering for more details. */
2001
- orderBy?: string;
2002
- /** Optional. The maximum number of batches to return in each response. The service may return fewer than this value. The default page size is 20; the maximum page size is 1000. */
2003
- pageSize?: number;
2004
- /** Optional. A page token received from a previous ListBatches call. Provide this token to retrieve the subsequent page. */
2005
- pageToken?: string;
2006
- /** Required. The parent, which owns this collection of batches. */
2007
- parent: string;
2834
+ /** Required. Parent (Batch) resource reference. */
2835
+ parent?: string;
2836
+ /** Optional. Enables/ disables physical plan description on demand */
2837
+ planDescription?: boolean;
2008
2838
  /** Returns response with indentations and line breaks. */
2009
2839
  prettyPrint?: boolean;
2010
2840
  /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
@@ -2013,11 +2843,9 @@ declare namespace gapi.client {
2013
2843
  upload_protocol?: string;
2014
2844
  /** Legacy upload protocol for media (e.g. "media", "multipart"). */
2015
2845
  uploadType?: string;
2016
- }): Request<ListBatchesResponse>;
2017
- }
2018
- interface OperationsResource {
2019
- /** Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED. */
2020
- cancel(request?: {
2846
+ }): Request<AccessSparkApplicationSqlQueryResponse>;
2847
+ /** Obtain data corresponding to a spark stage attempt for a Spark Application. */
2848
+ accessStageAttempt(request?: {
2021
2849
  /** V1 error format. */
2022
2850
  '$.xgafv'?: string;
2023
2851
  /** OAuth access token. */
@@ -2030,21 +2858,29 @@ declare namespace gapi.client {
2030
2858
  fields?: string;
2031
2859
  /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
2032
2860
  key?: string;
2033
- /** The name of the operation resource to be cancelled. */
2861
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
2034
2862
  name: string;
2035
2863
  /** OAuth 2.0 token for the current user. */
2036
2864
  oauth_token?: string;
2865
+ /** Required. Parent (Batch) resource reference. */
2866
+ parent?: string;
2037
2867
  /** Returns response with indentations and line breaks. */
2038
2868
  prettyPrint?: boolean;
2039
2869
  /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
2040
2870
  quotaUser?: string;
2871
+ /** Required. Stage Attempt ID */
2872
+ stageAttemptId?: number;
2873
+ /** Required. Stage ID */
2874
+ stageId?: string;
2875
+ /** Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field */
2876
+ summaryMetricsMask?: string;
2041
2877
  /** Upload protocol for media (e.g. "raw", "multipart"). */
2042
2878
  upload_protocol?: string;
2043
2879
  /** Legacy upload protocol for media (e.g. "media", "multipart"). */
2044
2880
  uploadType?: string;
2045
- }): Request<{}>;
2046
- /** Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. */
2047
- delete(request?: {
2881
+ }): Request<AccessSparkApplicationStageAttemptResponse>;
2882
+ /** Obtain RDD operation graph for a Spark Application Stage. Limits the number of clusters returned as part of the graph to 10000. */
2883
+ accessStageRddGraph(request?: {
2048
2884
  /** V1 error format. */
2049
2885
  '$.xgafv'?: string;
2050
2886
  /** OAuth access token. */
@@ -2057,37 +2893,55 @@ declare namespace gapi.client {
2057
2893
  fields?: string;
2058
2894
  /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
2059
2895
  key?: string;
2060
- /** The name of the operation resource to be deleted. */
2896
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
2061
2897
  name: string;
2062
2898
  /** OAuth 2.0 token for the current user. */
2063
2899
  oauth_token?: string;
2900
+ /** Required. Parent (Batch) resource reference. */
2901
+ parent?: string;
2064
2902
  /** Returns response with indentations and line breaks. */
2065
2903
  prettyPrint?: boolean;
2066
2904
  /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
2067
2905
  quotaUser?: string;
2906
+ /** Required. Stage ID */
2907
+ stageId?: string;
2068
2908
  /** Upload protocol for media (e.g. "raw", "multipart"). */
2069
2909
  upload_protocol?: string;
2070
2910
  /** Legacy upload protocol for media (e.g. "media", "multipart"). */
2071
2911
  uploadType?: string;
2072
- }): Request<{}>;
2073
- /** Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service. */
2074
- get(request?: {
2912
+ }): Request<AccessSparkApplicationStageRddOperationGraphResponse>;
2913
+ /** Obtain high level information and list of Spark Applications corresponding to a batch */
2914
+ search(request?: {
2075
2915
  /** V1 error format. */
2076
2916
  '$.xgafv'?: string;
2077
2917
  /** OAuth access token. */
2078
2918
  access_token?: string;
2079
2919
  /** Data format for response. */
2080
2920
  alt?: string;
2921
+ /** Optional. Search only applications in the chosen state. */
2922
+ applicationStatus?: string;
2081
2923
  /** JSONP */
2082
2924
  callback?: string;
2083
2925
  /** Selector specifying which fields to include in a partial response. */
2084
2926
  fields?: string;
2085
2927
  /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
2086
2928
  key?: string;
2087
- /** The name of the operation resource. */
2088
- name: string;
2929
+ /** Optional. Latest end timestamp to list. */
2930
+ maxEndTime?: string;
2931
+ /** Optional. Latest start timestamp to list. */
2932
+ maxTime?: string;
2933
+ /** Optional. Earliest end timestamp to list. */
2934
+ minEndTime?: string;
2935
+ /** Optional. Earliest start timestamp to list. */
2936
+ minTime?: string;
2089
2937
  /** OAuth 2.0 token for the current user. */
2090
2938
  oauth_token?: string;
2939
+ /** Optional. Maximum number of applications to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
2940
+ pageSize?: number;
2941
+ /** Optional. A page token received from a previous SearchSparkApplications call. Provide this token to retrieve the subsequent page. */
2942
+ pageToken?: string;
2943
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
2944
+ parent: string;
2091
2945
  /** Returns response with indentations and line breaks. */
2092
2946
  prettyPrint?: boolean;
2093
2947
  /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
@@ -2096,9 +2950,9 @@ declare namespace gapi.client {
2096
2950
  upload_protocol?: string;
2097
2951
  /** Legacy upload protocol for media (e.g. "media", "multipart"). */
2098
2952
  uploadType?: string;
2099
- }): Request<Operation>;
2100
- /** Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED. */
2101
- list(request?: {
2953
+ }): Request<SearchSparkApplicationsResponse>;
2954
+ /** Obtain data corresponding to executors for a Spark Application. */
2955
+ searchExecutors(request?: {
2102
2956
  /** V1 error format. */
2103
2957
  '$.xgafv'?: string;
2104
2958
  /** OAuth access token. */
@@ -2107,20 +2961,22 @@ declare namespace gapi.client {
2107
2961
  alt?: string;
2108
2962
  /** JSONP */
2109
2963
  callback?: string;
2964
+ /** Optional. Filter to select whether active/ dead or all executors should be selected. */
2965
+ executorStatus?: string;
2110
2966
  /** Selector specifying which fields to include in a partial response. */
2111
2967
  fields?: string;
2112
- /** The standard list filter. */
2113
- filter?: string;
2114
2968
  /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
2115
2969
  key?: string;
2116
- /** The name of the operation's parent resource. */
2970
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
2117
2971
  name: string;
2118
2972
  /** OAuth 2.0 token for the current user. */
2119
2973
  oauth_token?: string;
2120
- /** The standard list page size. */
2974
+ /** Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
2121
2975
  pageSize?: number;
2122
- /** The standard list page token. */
2976
+ /** Optional. A page token received from a previous AccessSparkApplicationExecutorsList call. Provide this token to retrieve the subsequent page. */
2123
2977
  pageToken?: string;
2978
+ /** Required. Parent (Batch) resource reference. */
2979
+ parent?: string;
2124
2980
  /** Returns response with indentations and line breaks. */
2125
2981
  prettyPrint?: boolean;
2126
2982
  /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
@@ -2129,7 +2985,1442 @@ declare namespace gapi.client {
2129
2985
  upload_protocol?: string;
2130
2986
  /** Legacy upload protocol for media (e.g. "media", "multipart"). */
2131
2987
  uploadType?: string;
2132
- }): Request<ListOperationsResponse>;
2988
+ }): Request<SearchSparkApplicationExecutorsResponse>;
2989
+ /** Obtain executor summary with respect to a spark stage attempt. */
2990
+ searchExecutorStageSummary(request?: {
2991
+ /** V1 error format. */
2992
+ '$.xgafv'?: string;
2993
+ /** OAuth access token. */
2994
+ access_token?: string;
2995
+ /** Data format for response. */
2996
+ alt?: string;
2997
+ /** JSONP */
2998
+ callback?: string;
2999
+ /** Selector specifying which fields to include in a partial response. */
3000
+ fields?: string;
3001
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3002
+ key?: string;
3003
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
3004
+ name: string;
3005
+ /** OAuth 2.0 token for the current user. */
3006
+ oauth_token?: string;
3007
+ /** Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
3008
+ pageSize?: number;
3009
+ /** Optional. A page token received from a previous AccessSparkApplicationExecutorsList call. Provide this token to retrieve the subsequent page. */
3010
+ pageToken?: string;
3011
+ /** Required. Parent (Batch) resource reference. */
3012
+ parent?: string;
3013
+ /** Returns response with indentations and line breaks. */
3014
+ prettyPrint?: boolean;
3015
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3016
+ quotaUser?: string;
3017
+ /** Required. Stage Attempt ID */
3018
+ stageAttemptId?: number;
3019
+ /** Required. Stage ID */
3020
+ stageId?: string;
3021
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3022
+ upload_protocol?: string;
3023
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3024
+ uploadType?: string;
3025
+ }): Request<SearchSparkApplicationExecutorStageSummaryResponse>;
3026
+ /** Obtain list of spark jobs corresponding to a Spark Application. */
3027
+ searchJobs(request?: {
3028
+ /** V1 error format. */
3029
+ '$.xgafv'?: string;
3030
+ /** OAuth access token. */
3031
+ access_token?: string;
3032
+ /** Data format for response. */
3033
+ alt?: string;
3034
+ /** JSONP */
3035
+ callback?: string;
3036
+ /** Selector specifying which fields to include in a partial response. */
3037
+ fields?: string;
3038
+ /** Optional. List only jobs in the specific state. */
3039
+ jobStatus?: string;
3040
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3041
+ key?: string;
3042
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
3043
+ name: string;
3044
+ /** OAuth 2.0 token for the current user. */
3045
+ oauth_token?: string;
3046
+ /** Optional. Maximum number of jobs to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
3047
+ pageSize?: number;
3048
+ /** Optional. A page token received from a previous SearchSparkApplicationJobs call. Provide this token to retrieve the subsequent page. */
3049
+ pageToken?: string;
3050
+ /** Required. Parent (Batch) resource reference. */
3051
+ parent?: string;
3052
+ /** Returns response with indentations and line breaks. */
3053
+ prettyPrint?: boolean;
3054
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3055
+ quotaUser?: string;
3056
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3057
+ upload_protocol?: string;
3058
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3059
+ uploadType?: string;
3060
+ }): Request<SearchSparkApplicationJobsResponse>;
3061
+ /** Obtain data corresponding to SQL Queries for a Spark Application. */
3062
+ searchSqlQueries(request?: {
3063
+ /** V1 error format. */
3064
+ '$.xgafv'?: string;
3065
+ /** OAuth access token. */
3066
+ access_token?: string;
3067
+ /** Data format for response. */
3068
+ alt?: string;
3069
+ /** JSONP */
3070
+ callback?: string;
3071
+ /** Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide. */
3072
+ details?: boolean;
3073
+ /** Selector specifying which fields to include in a partial response. */
3074
+ fields?: string;
3075
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3076
+ key?: string;
3077
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
3078
+ name: string;
3079
+ /** OAuth 2.0 token for the current user. */
3080
+ oauth_token?: string;
3081
+ /** Optional. Maximum number of queries to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
3082
+ pageSize?: number;
3083
+ /** Optional. A page token received from a previous SearchSparkApplicationSqlQueries call. Provide this token to retrieve the subsequent page. */
3084
+ pageToken?: string;
3085
+ /** Required. Parent (Batch) resource reference. */
3086
+ parent?: string;
3087
+ /** Optional. Enables/ disables physical plan description on demand */
3088
+ planDescription?: boolean;
3089
+ /** Returns response with indentations and line breaks. */
3090
+ prettyPrint?: boolean;
3091
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3092
+ quotaUser?: string;
3093
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3094
+ upload_protocol?: string;
3095
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3096
+ uploadType?: string;
3097
+ }): Request<SearchSparkApplicationSqlQueriesResponse>;
3098
+ /** Obtain data corresponding to a spark stage attempts for a Spark Application. */
3099
+ searchStageAttempts(request?: {
3100
+ /** V1 error format. */
3101
+ '$.xgafv'?: string;
3102
+ /** OAuth access token. */
3103
+ access_token?: string;
3104
+ /** Data format for response. */
3105
+ alt?: string;
3106
+ /** JSONP */
3107
+ callback?: string;
3108
+ /** Selector specifying which fields to include in a partial response. */
3109
+ fields?: string;
3110
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3111
+ key?: string;
3112
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
3113
+ name: string;
3114
+ /** OAuth 2.0 token for the current user. */
3115
+ oauth_token?: string;
3116
+ /** Optional. Maximum number of stage attempts (paging based on stage_attempt_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
3117
+ pageSize?: number;
3118
+ /** Optional. A page token received from a previous SearchSparkApplicationStageAttempts call. Provide this token to retrieve the subsequent page. */
3119
+ pageToken?: string;
3120
+ /** Required. Parent (Batch) resource reference. */
3121
+ parent?: string;
3122
+ /** Returns response with indentations and line breaks. */
3123
+ prettyPrint?: boolean;
3124
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3125
+ quotaUser?: string;
3126
+ /** Required. Stage ID for which attempts are to be fetched */
3127
+ stageId?: string;
3128
+ /** Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field */
3129
+ summaryMetricsMask?: string;
3130
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3131
+ upload_protocol?: string;
3132
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3133
+ uploadType?: string;
3134
+ }): Request<SearchSparkApplicationStageAttemptsResponse>;
3135
+ /** Obtain data corresponding to tasks for a spark stage attempt for a Spark Application. */
3136
+ searchStageAttemptTasks(request?: {
3137
+ /** V1 error format. */
3138
+ '$.xgafv'?: string;
3139
+ /** OAuth access token. */
3140
+ access_token?: string;
3141
+ /** Data format for response. */
3142
+ alt?: string;
3143
+ /** JSONP */
3144
+ callback?: string;
3145
+ /** Selector specifying which fields to include in a partial response. */
3146
+ fields?: string;
3147
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3148
+ key?: string;
3149
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
3150
+ name: string;
3151
+ /** OAuth 2.0 token for the current user. */
3152
+ oauth_token?: string;
3153
+ /** Optional. Maximum number of tasks to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
3154
+ pageSize?: number;
3155
+ /** Optional. A page token received from a previous ListSparkApplicationStageAttemptTasks call. Provide this token to retrieve the subsequent page. */
3156
+ pageToken?: string;
3157
+ /** Required. Parent (Batch) resource reference. */
3158
+ parent?: string;
3159
+ /** Returns response with indentations and line breaks. */
3160
+ prettyPrint?: boolean;
3161
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3162
+ quotaUser?: string;
3163
+ /** Optional. Sort the tasks by runtime. */
3164
+ sortRuntime?: boolean;
3165
+ /** Optional. Stage Attempt ID */
3166
+ stageAttemptId?: number;
3167
+ /** Optional. Stage ID */
3168
+ stageId?: string;
3169
+ /** Optional. List only tasks in the state. */
3170
+ taskStatus?: string;
3171
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3172
+ upload_protocol?: string;
3173
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3174
+ uploadType?: string;
3175
+ }): Request<SearchSparkApplicationStageAttemptTasksResponse>;
3176
+ /** Obtain data corresponding to stages for a Spark Application. */
3177
+ searchStages(request?: {
3178
+ /** V1 error format. */
3179
+ '$.xgafv'?: string;
3180
+ /** OAuth access token. */
3181
+ access_token?: string;
3182
+ /** Data format for response. */
3183
+ alt?: string;
3184
+ /** JSONP */
3185
+ callback?: string;
3186
+ /** Selector specifying which fields to include in a partial response. */
3187
+ fields?: string;
3188
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3189
+ key?: string;
3190
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
3191
+ name: string;
3192
+ /** OAuth 2.0 token for the current user. */
3193
+ oauth_token?: string;
3194
+ /** Optional. Maximum number of stages (paging based on stage_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
3195
+ pageSize?: number;
3196
+ /** Optional. A page token received from a previous FetchSparkApplicationStagesList call. Provide this token to retrieve the subsequent page. */
3197
+ pageToken?: string;
3198
+ /** Required. Parent (Batch) resource reference. */
3199
+ parent?: string;
3200
+ /** Returns response with indentations and line breaks. */
3201
+ prettyPrint?: boolean;
3202
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3203
+ quotaUser?: string;
3204
+ /** Optional. List only stages in the given state. */
3205
+ stageStatus?: string;
3206
+ /** Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field */
3207
+ summaryMetricsMask?: string;
3208
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3209
+ upload_protocol?: string;
3210
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3211
+ uploadType?: string;
3212
+ }): Request<SearchSparkApplicationStagesResponse>;
3213
+ /** Obtain summary of Executor Summary for a Spark Application */
3214
+ summarizeExecutors(request?: {
3215
+ /** V1 error format. */
3216
+ '$.xgafv'?: string;
3217
+ /** OAuth access token. */
3218
+ access_token?: string;
3219
+ /** Data format for response. */
3220
+ alt?: string;
3221
+ /** JSONP */
3222
+ callback?: string;
3223
+ /** Selector specifying which fields to include in a partial response. */
3224
+ fields?: string;
3225
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3226
+ key?: string;
3227
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
3228
+ name: string;
3229
+ /** OAuth 2.0 token for the current user. */
3230
+ oauth_token?: string;
3231
+ /** Required. Parent (Batch) resource reference. */
3232
+ parent?: string;
3233
+ /** Returns response with indentations and line breaks. */
3234
+ prettyPrint?: boolean;
3235
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3236
+ quotaUser?: string;
3237
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3238
+ upload_protocol?: string;
3239
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3240
+ uploadType?: string;
3241
+ }): Request<SummarizeSparkApplicationExecutorsResponse>;
3242
+ /** Obtain summary of Jobs for a Spark Application */
3243
+ summarizeJobs(request?: {
3244
+ /** V1 error format. */
3245
+ '$.xgafv'?: string;
3246
+ /** OAuth access token. */
3247
+ access_token?: string;
3248
+ /** Data format for response. */
3249
+ alt?: string;
3250
+ /** JSONP */
3251
+ callback?: string;
3252
+ /** Selector specifying which fields to include in a partial response. */
3253
+ fields?: string;
3254
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3255
+ key?: string;
3256
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
3257
+ name: string;
3258
+ /** OAuth 2.0 token for the current user. */
3259
+ oauth_token?: string;
3260
+ /** Required. Parent (Batch) resource reference. */
3261
+ parent?: string;
3262
+ /** Returns response with indentations and line breaks. */
3263
+ prettyPrint?: boolean;
3264
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3265
+ quotaUser?: string;
3266
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3267
+ upload_protocol?: string;
3268
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3269
+ uploadType?: string;
3270
+ }): Request<SummarizeSparkApplicationJobsResponse>;
3271
+ /** Obtain summary of Tasks for a Spark Application Stage Attempt */
3272
+ summarizeStageAttemptTasks(request?: {
3273
+ /** V1 error format. */
3274
+ '$.xgafv'?: string;
3275
+ /** OAuth access token. */
3276
+ access_token?: string;
3277
+ /** Data format for response. */
3278
+ alt?: string;
3279
+ /** JSONP */
3280
+ callback?: string;
3281
+ /** Selector specifying which fields to include in a partial response. */
3282
+ fields?: string;
3283
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3284
+ key?: string;
3285
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
3286
+ name: string;
3287
+ /** OAuth 2.0 token for the current user. */
3288
+ oauth_token?: string;
3289
+ /** Required. Parent (Batch) resource reference. */
3290
+ parent?: string;
3291
+ /** Returns response with indentations and line breaks. */
3292
+ prettyPrint?: boolean;
3293
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3294
+ quotaUser?: string;
3295
+ /** Required. Stage Attempt ID */
3296
+ stageAttemptId?: number;
3297
+ /** Required. Stage ID */
3298
+ stageId?: string;
3299
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3300
+ upload_protocol?: string;
3301
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3302
+ uploadType?: string;
3303
+ }): Request<SummarizeSparkApplicationStageAttemptTasksResponse>;
3304
+ /** Obtain summary of Stages for a Spark Application */
3305
+ summarizeStages(request?: {
3306
+ /** V1 error format. */
3307
+ '$.xgafv'?: string;
3308
+ /** OAuth access token. */
3309
+ access_token?: string;
3310
+ /** Data format for response. */
3311
+ alt?: string;
3312
+ /** JSONP */
3313
+ callback?: string;
3314
+ /** Selector specifying which fields to include in a partial response. */
3315
+ fields?: string;
3316
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3317
+ key?: string;
3318
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
3319
+ name: string;
3320
+ /** OAuth 2.0 token for the current user. */
3321
+ oauth_token?: string;
3322
+ /** Required. Parent (Batch) resource reference. */
3323
+ parent?: string;
3324
+ /** Returns response with indentations and line breaks. */
3325
+ prettyPrint?: boolean;
3326
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3327
+ quotaUser?: string;
3328
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3329
+ upload_protocol?: string;
3330
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3331
+ uploadType?: string;
3332
+ }): Request<SummarizeSparkApplicationStagesResponse>;
3333
+ /** Write wrapper objects from dataplane to spanner */
3334
+ write(request: {
3335
+ /** V1 error format. */
3336
+ '$.xgafv'?: string;
3337
+ /** OAuth access token. */
3338
+ access_token?: string;
3339
+ /** Data format for response. */
3340
+ alt?: string;
3341
+ /** JSONP */
3342
+ callback?: string;
3343
+ /** Selector specifying which fields to include in a partial response. */
3344
+ fields?: string;
3345
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3346
+ key?: string;
3347
+ /** Required. The fully qualified name of the spark application to write data about in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
3348
+ name: string;
3349
+ /** OAuth 2.0 token for the current user. */
3350
+ oauth_token?: string;
3351
+ /** Returns response with indentations and line breaks. */
3352
+ prettyPrint?: boolean;
3353
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3354
+ quotaUser?: string;
3355
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3356
+ upload_protocol?: string;
3357
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3358
+ uploadType?: string;
3359
+ /** Request body */
3360
+ resource: WriteSparkApplicationContextRequest;
3361
+ }): Request<{}>;
3362
+ write(
3363
+ request: {
3364
+ /** V1 error format. */
3365
+ '$.xgafv'?: string;
3366
+ /** OAuth access token. */
3367
+ access_token?: string;
3368
+ /** Data format for response. */
3369
+ alt?: string;
3370
+ /** JSONP */
3371
+ callback?: string;
3372
+ /** Selector specifying which fields to include in a partial response. */
3373
+ fields?: string;
3374
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3375
+ key?: string;
3376
+ /** Required. The fully qualified name of the spark application to write data about in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
3377
+ name: string;
3378
+ /** OAuth 2.0 token for the current user. */
3379
+ oauth_token?: string;
3380
+ /** Returns response with indentations and line breaks. */
3381
+ prettyPrint?: boolean;
3382
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3383
+ quotaUser?: string;
3384
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3385
+ upload_protocol?: string;
3386
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3387
+ uploadType?: string;
3388
+ },
3389
+ body: WriteSparkApplicationContextRequest
3390
+ ): Request<{}>;
3391
+ }
3392
+ interface BatchesResource {
3393
+ /** Analyze a Batch for possible recommendations and insights. */
3394
+ analyze(request: {
3395
+ /** V1 error format. */
3396
+ '$.xgafv'?: string;
3397
+ /** OAuth access token. */
3398
+ access_token?: string;
3399
+ /** Data format for response. */
3400
+ alt?: string;
3401
+ /** JSONP */
3402
+ callback?: string;
3403
+ /** Selector specifying which fields to include in a partial response. */
3404
+ fields?: string;
3405
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3406
+ key?: string;
3407
+ /** Required. The fully qualified name of the batch to analyze in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
3408
+ name: string;
3409
+ /** OAuth 2.0 token for the current user. */
3410
+ oauth_token?: string;
3411
+ /** Returns response with indentations and line breaks. */
3412
+ prettyPrint?: boolean;
3413
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3414
+ quotaUser?: string;
3415
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3416
+ upload_protocol?: string;
3417
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3418
+ uploadType?: string;
3419
+ /** Request body */
3420
+ resource: AnalyzeBatchRequest;
3421
+ }): Request<Operation>;
3422
+ analyze(
3423
+ request: {
3424
+ /** V1 error format. */
3425
+ '$.xgafv'?: string;
3426
+ /** OAuth access token. */
3427
+ access_token?: string;
3428
+ /** Data format for response. */
3429
+ alt?: string;
3430
+ /** JSONP */
3431
+ callback?: string;
3432
+ /** Selector specifying which fields to include in a partial response. */
3433
+ fields?: string;
3434
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3435
+ key?: string;
3436
+ /** Required. The fully qualified name of the batch to analyze in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
3437
+ name: string;
3438
+ /** OAuth 2.0 token for the current user. */
3439
+ oauth_token?: string;
3440
+ /** Returns response with indentations and line breaks. */
3441
+ prettyPrint?: boolean;
3442
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3443
+ quotaUser?: string;
3444
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3445
+ upload_protocol?: string;
3446
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3447
+ uploadType?: string;
3448
+ },
3449
+ body: AnalyzeBatchRequest
3450
+ ): Request<Operation>;
3451
+ /** Creates a batch workload that executes asynchronously. */
3452
+ create(request: {
3453
+ /** V1 error format. */
3454
+ '$.xgafv'?: string;
3455
+ /** OAuth access token. */
3456
+ access_token?: string;
3457
+ /** Data format for response. */
3458
+ alt?: string;
3459
+ /** Optional. The ID to use for the batch, which will become the final component of the batch's resource name.This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/. */
3460
+ batchId?: string;
3461
+ /** JSONP */
3462
+ callback?: string;
3463
+ /** Selector specifying which fields to include in a partial response. */
3464
+ fields?: string;
3465
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3466
+ key?: string;
3467
+ /** OAuth 2.0 token for the current user. */
3468
+ oauth_token?: string;
3469
+ /** Required. The parent resource where this batch will be created. */
3470
+ parent: string;
3471
+ /** Returns response with indentations and line breaks. */
3472
+ prettyPrint?: boolean;
3473
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3474
+ quotaUser?: string;
3475
+ /** Optional. A unique ID used to identify the request. If the service receives two CreateBatchRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first Batch created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. */
3476
+ requestId?: string;
3477
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3478
+ upload_protocol?: string;
3479
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3480
+ uploadType?: string;
3481
+ /** Request body */
3482
+ resource: Batch;
3483
+ }): Request<Operation>;
3484
+ create(
3485
+ request: {
3486
+ /** V1 error format. */
3487
+ '$.xgafv'?: string;
3488
+ /** OAuth access token. */
3489
+ access_token?: string;
3490
+ /** Data format for response. */
3491
+ alt?: string;
3492
+ /** Optional. The ID to use for the batch, which will become the final component of the batch's resource name.This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/. */
3493
+ batchId?: string;
3494
+ /** JSONP */
3495
+ callback?: string;
3496
+ /** Selector specifying which fields to include in a partial response. */
3497
+ fields?: string;
3498
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3499
+ key?: string;
3500
+ /** OAuth 2.0 token for the current user. */
3501
+ oauth_token?: string;
3502
+ /** Required. The parent resource where this batch will be created. */
3503
+ parent: string;
3504
+ /** Returns response with indentations and line breaks. */
3505
+ prettyPrint?: boolean;
3506
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3507
+ quotaUser?: string;
3508
+ /** Optional. A unique ID used to identify the request. If the service receives two CreateBatchRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first Batch created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. */
3509
+ requestId?: string;
3510
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3511
+ upload_protocol?: string;
3512
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3513
+ uploadType?: string;
3514
+ },
3515
+ body: Batch
3516
+ ): Request<Operation>;
3517
+ /** Deletes the batch workload resource. If the batch is not in a CANCELLED, SUCCEEDED or FAILED State, the delete operation fails and the response returns FAILED_PRECONDITION. */
3518
+ delete(request?: {
3519
+ /** V1 error format. */
3520
+ '$.xgafv'?: string;
3521
+ /** OAuth access token. */
3522
+ access_token?: string;
3523
+ /** Data format for response. */
3524
+ alt?: string;
3525
+ /** JSONP */
3526
+ callback?: string;
3527
+ /** Selector specifying which fields to include in a partial response. */
3528
+ fields?: string;
3529
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3530
+ key?: string;
3531
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
3532
+ name: string;
3533
+ /** OAuth 2.0 token for the current user. */
3534
+ oauth_token?: string;
3535
+ /** Returns response with indentations and line breaks. */
3536
+ prettyPrint?: boolean;
3537
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3538
+ quotaUser?: string;
3539
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3540
+ upload_protocol?: string;
3541
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3542
+ uploadType?: string;
3543
+ }): Request<{}>;
3544
+ /** Gets the batch workload resource representation. */
3545
+ get(request?: {
3546
+ /** V1 error format. */
3547
+ '$.xgafv'?: string;
3548
+ /** OAuth access token. */
3549
+ access_token?: string;
3550
+ /** Data format for response. */
3551
+ alt?: string;
3552
+ /** JSONP */
3553
+ callback?: string;
3554
+ /** Selector specifying which fields to include in a partial response. */
3555
+ fields?: string;
3556
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3557
+ key?: string;
3558
+ /** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
3559
+ name: string;
3560
+ /** OAuth 2.0 token for the current user. */
3561
+ oauth_token?: string;
3562
+ /** Returns response with indentations and line breaks. */
3563
+ prettyPrint?: boolean;
3564
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3565
+ quotaUser?: string;
3566
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3567
+ upload_protocol?: string;
3568
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3569
+ uploadType?: string;
3570
+ }): Request<Batch>;
3571
+ /** Lists batch workloads. */
3572
+ list(request?: {
3573
+ /** V1 error format. */
3574
+ '$.xgafv'?: string;
3575
+ /** OAuth access token. */
3576
+ access_token?: string;
3577
+ /** Data format for response. */
3578
+ alt?: string;
3579
+ /** JSONP */
3580
+ callback?: string;
3581
+ /** Selector specifying which fields to include in a partial response. */
3582
+ fields?: string;
3583
+ /** Optional. A filter for the batches to return in the response.A filter is a logical expression constraining the values of various fields in each batch resource. Filters are case sensitive, and may contain multiple clauses combined with logical operators (AND/OR). Supported fields are batch_id, batch_uuid, state, create_time, and labels.e.g. state = RUNNING and create_time < "2023-01-01T00:00:00Z" filters for batches in state RUNNING that were created before 2023-01-01. state = RUNNING and labels.environment=production filters for batches in state in a RUNNING state that have a production environment label.See https://google.aip.dev/assets/misc/ebnf-filtering.txt for a detailed description of the filter syntax and a list of supported comparisons. */
3584
+ filter?: string;
3585
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3586
+ key?: string;
3587
+ /** OAuth 2.0 token for the current user. */
3588
+ oauth_token?: string;
3589
+ /** Optional. Field(s) on which to sort the list of batches.Currently the only supported sort orders are unspecified (empty) and create_time desc to sort by most recently created batches first.See https://google.aip.dev/132#ordering for more details. */
3590
+ orderBy?: string;
3591
+ /** Optional. The maximum number of batches to return in each response. The service may return fewer than this value. The default page size is 20; the maximum page size is 1000. */
3592
+ pageSize?: number;
3593
+ /** Optional. A page token received from a previous ListBatches call. Provide this token to retrieve the subsequent page. */
3594
+ pageToken?: string;
3595
+ /** Required. The parent, which owns this collection of batches. */
3596
+ parent: string;
3597
+ /** Returns response with indentations and line breaks. */
3598
+ prettyPrint?: boolean;
3599
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3600
+ quotaUser?: string;
3601
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3602
+ upload_protocol?: string;
3603
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3604
+ uploadType?: string;
3605
+ }): Request<ListBatchesResponse>;
3606
+ sparkApplications: SparkApplicationsResource;
3607
+ }
3608
+ interface OperationsResource {
3609
+ /** Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED. */
3610
+ cancel(request?: {
3611
+ /** V1 error format. */
3612
+ '$.xgafv'?: string;
3613
+ /** OAuth access token. */
3614
+ access_token?: string;
3615
+ /** Data format for response. */
3616
+ alt?: string;
3617
+ /** JSONP */
3618
+ callback?: string;
3619
+ /** Selector specifying which fields to include in a partial response. */
3620
+ fields?: string;
3621
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3622
+ key?: string;
3623
+ /** The name of the operation resource to be cancelled. */
3624
+ name: string;
3625
+ /** OAuth 2.0 token for the current user. */
3626
+ oauth_token?: string;
3627
+ /** Returns response with indentations and line breaks. */
3628
+ prettyPrint?: boolean;
3629
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3630
+ quotaUser?: string;
3631
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3632
+ upload_protocol?: string;
3633
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3634
+ uploadType?: string;
3635
+ }): Request<{}>;
3636
+ /** Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. */
3637
+ delete(request?: {
3638
+ /** V1 error format. */
3639
+ '$.xgafv'?: string;
3640
+ /** OAuth access token. */
3641
+ access_token?: string;
3642
+ /** Data format for response. */
3643
+ alt?: string;
3644
+ /** JSONP */
3645
+ callback?: string;
3646
+ /** Selector specifying which fields to include in a partial response. */
3647
+ fields?: string;
3648
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3649
+ key?: string;
3650
+ /** The name of the operation resource to be deleted. */
3651
+ name: string;
3652
+ /** OAuth 2.0 token for the current user. */
3653
+ oauth_token?: string;
3654
+ /** Returns response with indentations and line breaks. */
3655
+ prettyPrint?: boolean;
3656
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3657
+ quotaUser?: string;
3658
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3659
+ upload_protocol?: string;
3660
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3661
+ uploadType?: string;
3662
+ }): Request<{}>;
3663
+ /** Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service. */
3664
+ get(request?: {
3665
+ /** V1 error format. */
3666
+ '$.xgafv'?: string;
3667
+ /** OAuth access token. */
3668
+ access_token?: string;
3669
+ /** Data format for response. */
3670
+ alt?: string;
3671
+ /** JSONP */
3672
+ callback?: string;
3673
+ /** Selector specifying which fields to include in a partial response. */
3674
+ fields?: string;
3675
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3676
+ key?: string;
3677
+ /** The name of the operation resource. */
3678
+ name: string;
3679
+ /** OAuth 2.0 token for the current user. */
3680
+ oauth_token?: string;
3681
+ /** Returns response with indentations and line breaks. */
3682
+ prettyPrint?: boolean;
3683
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3684
+ quotaUser?: string;
3685
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3686
+ upload_protocol?: string;
3687
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3688
+ uploadType?: string;
3689
+ }): Request<Operation>;
3690
+ /** Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED. */
3691
+ list(request?: {
3692
+ /** V1 error format. */
3693
+ '$.xgafv'?: string;
3694
+ /** OAuth access token. */
3695
+ access_token?: string;
3696
+ /** Data format for response. */
3697
+ alt?: string;
3698
+ /** JSONP */
3699
+ callback?: string;
3700
+ /** Selector specifying which fields to include in a partial response. */
3701
+ fields?: string;
3702
+ /** The standard list filter. */
3703
+ filter?: string;
3704
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3705
+ key?: string;
3706
+ /** The name of the operation's parent resource. */
3707
+ name: string;
3708
+ /** OAuth 2.0 token for the current user. */
3709
+ oauth_token?: string;
3710
+ /** The standard list page size. */
3711
+ pageSize?: number;
3712
+ /** The standard list page token. */
3713
+ pageToken?: string;
3714
+ /** Returns response with indentations and line breaks. */
3715
+ prettyPrint?: boolean;
3716
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3717
+ quotaUser?: string;
3718
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3719
+ upload_protocol?: string;
3720
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3721
+ uploadType?: string;
3722
+ }): Request<ListOperationsResponse>;
3723
+ }
3724
+ interface SparkApplicationsResource {
3725
+ /** Obtain high level information corresponding to a single Spark Application. */
3726
+ access(request?: {
3727
+ /** V1 error format. */
3728
+ '$.xgafv'?: string;
3729
+ /** OAuth access token. */
3730
+ access_token?: string;
3731
+ /** Data format for response. */
3732
+ alt?: string;
3733
+ /** JSONP */
3734
+ callback?: string;
3735
+ /** Selector specifying which fields to include in a partial response. */
3736
+ fields?: string;
3737
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3738
+ key?: string;
3739
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
3740
+ name: string;
3741
+ /** OAuth 2.0 token for the current user. */
3742
+ oauth_token?: string;
3743
+ /** Required. Parent (Session) resource reference. */
3744
+ parent?: string;
3745
+ /** Returns response with indentations and line breaks. */
3746
+ prettyPrint?: boolean;
3747
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3748
+ quotaUser?: string;
3749
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3750
+ upload_protocol?: string;
3751
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3752
+ uploadType?: string;
3753
+ }): Request<AccessSessionSparkApplicationResponse>;
3754
+ /** Obtain environment details for a Spark Application */
3755
+ accessEnvironmentInfo(request?: {
3756
+ /** V1 error format. */
3757
+ '$.xgafv'?: string;
3758
+ /** OAuth access token. */
3759
+ access_token?: string;
3760
+ /** Data format for response. */
3761
+ alt?: string;
3762
+ /** JSONP */
3763
+ callback?: string;
3764
+ /** Selector specifying which fields to include in a partial response. */
3765
+ fields?: string;
3766
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3767
+ key?: string;
3768
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
3769
+ name: string;
3770
+ /** OAuth 2.0 token for the current user. */
3771
+ oauth_token?: string;
3772
+ /** Required. Parent (Session) resource reference. */
3773
+ parent?: string;
3774
+ /** Returns response with indentations and line breaks. */
3775
+ prettyPrint?: boolean;
3776
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3777
+ quotaUser?: string;
3778
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3779
+ upload_protocol?: string;
3780
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3781
+ uploadType?: string;
3782
+ }): Request<AccessSessionSparkApplicationEnvironmentInfoResponse>;
3783
+ /** Obtain data corresponding to a spark job for a Spark Application. */
3784
+ accessJob(request?: {
3785
+ /** V1 error format. */
3786
+ '$.xgafv'?: string;
3787
+ /** OAuth access token. */
3788
+ access_token?: string;
3789
+ /** Data format for response. */
3790
+ alt?: string;
3791
+ /** JSONP */
3792
+ callback?: string;
3793
+ /** Selector specifying which fields to include in a partial response. */
3794
+ fields?: string;
3795
+ /** Required. Job ID to fetch data for. */
3796
+ jobId?: string;
3797
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3798
+ key?: string;
3799
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
3800
+ name: string;
3801
+ /** OAuth 2.0 token for the current user. */
3802
+ oauth_token?: string;
3803
+ /** Required. Parent (Session) resource reference. */
3804
+ parent?: string;
3805
+ /** Returns response with indentations and line breaks. */
3806
+ prettyPrint?: boolean;
3807
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3808
+ quotaUser?: string;
3809
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3810
+ upload_protocol?: string;
3811
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3812
+ uploadType?: string;
3813
+ }): Request<AccessSessionSparkApplicationJobResponse>;
3814
+ /** Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000. */
3815
+ accessSqlPlan(request?: {
3816
+ /** V1 error format. */
3817
+ '$.xgafv'?: string;
3818
+ /** OAuth access token. */
3819
+ access_token?: string;
3820
+ /** Data format for response. */
3821
+ alt?: string;
3822
+ /** JSONP */
3823
+ callback?: string;
3824
+ /** Required. Execution ID */
3825
+ executionId?: string;
3826
+ /** Selector specifying which fields to include in a partial response. */
3827
+ fields?: string;
3828
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3829
+ key?: string;
3830
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
3831
+ name: string;
3832
+ /** OAuth 2.0 token for the current user. */
3833
+ oauth_token?: string;
3834
+ /** Required. Parent (Session) resource reference. */
3835
+ parent?: string;
3836
+ /** Returns response with indentations and line breaks. */
3837
+ prettyPrint?: boolean;
3838
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3839
+ quotaUser?: string;
3840
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3841
+ upload_protocol?: string;
3842
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3843
+ uploadType?: string;
3844
+ }): Request<AccessSessionSparkApplicationSqlSparkPlanGraphResponse>;
3845
+ /** Obtain data corresponding to a particular SQL Query for a Spark Application. */
3846
+ accessSqlQuery(request?: {
3847
+ /** V1 error format. */
3848
+ '$.xgafv'?: string;
3849
+ /** OAuth access token. */
3850
+ access_token?: string;
3851
+ /** Data format for response. */
3852
+ alt?: string;
3853
+ /** JSONP */
3854
+ callback?: string;
3855
+ /** Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide. */
3856
+ details?: boolean;
3857
+ /** Required. Execution ID */
3858
+ executionId?: string;
3859
+ /** Selector specifying which fields to include in a partial response. */
3860
+ fields?: string;
3861
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3862
+ key?: string;
3863
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
3864
+ name: string;
3865
+ /** OAuth 2.0 token for the current user. */
3866
+ oauth_token?: string;
3867
+ /** Required. Parent (Session) resource reference. */
3868
+ parent?: string;
3869
+ /** Optional. Enables/ disables physical plan description on demand */
3870
+ planDescription?: boolean;
3871
+ /** Returns response with indentations and line breaks. */
3872
+ prettyPrint?: boolean;
3873
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3874
+ quotaUser?: string;
3875
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3876
+ upload_protocol?: string;
3877
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3878
+ uploadType?: string;
3879
+ }): Request<AccessSessionSparkApplicationSqlQueryResponse>;
3880
+ /** Obtain data corresponding to a spark stage attempt for a Spark Application. */
3881
+ accessStageAttempt(request?: {
3882
+ /** V1 error format. */
3883
+ '$.xgafv'?: string;
3884
+ /** OAuth access token. */
3885
+ access_token?: string;
3886
+ /** Data format for response. */
3887
+ alt?: string;
3888
+ /** JSONP */
3889
+ callback?: string;
3890
+ /** Selector specifying which fields to include in a partial response. */
3891
+ fields?: string;
3892
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3893
+ key?: string;
3894
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
3895
+ name: string;
3896
+ /** OAuth 2.0 token for the current user. */
3897
+ oauth_token?: string;
3898
+ /** Required. Parent (Session) resource reference. */
3899
+ parent?: string;
3900
+ /** Returns response with indentations and line breaks. */
3901
+ prettyPrint?: boolean;
3902
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3903
+ quotaUser?: string;
3904
+ /** Required. Stage Attempt ID */
3905
+ stageAttemptId?: number;
3906
+ /** Required. Stage ID */
3907
+ stageId?: string;
3908
+ /** Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field */
3909
+ summaryMetricsMask?: string;
3910
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3911
+ upload_protocol?: string;
3912
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3913
+ uploadType?: string;
3914
+ }): Request<AccessSessionSparkApplicationStageAttemptResponse>;
3915
+ /** Obtain RDD operation graph for a Spark Application Stage. Limits the number of clusters returned as part of the graph to 10000. */
3916
+ accessStageRddGraph(request?: {
3917
+ /** V1 error format. */
3918
+ '$.xgafv'?: string;
3919
+ /** OAuth access token. */
3920
+ access_token?: string;
3921
+ /** Data format for response. */
3922
+ alt?: string;
3923
+ /** JSONP */
3924
+ callback?: string;
3925
+ /** Selector specifying which fields to include in a partial response. */
3926
+ fields?: string;
3927
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3928
+ key?: string;
3929
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
3930
+ name: string;
3931
+ /** OAuth 2.0 token for the current user. */
3932
+ oauth_token?: string;
3933
+ /** Required. Parent (Session) resource reference. */
3934
+ parent?: string;
3935
+ /** Returns response with indentations and line breaks. */
3936
+ prettyPrint?: boolean;
3937
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3938
+ quotaUser?: string;
3939
+ /** Required. Stage ID */
3940
+ stageId?: string;
3941
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3942
+ upload_protocol?: string;
3943
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3944
+ uploadType?: string;
3945
+ }): Request<AccessSessionSparkApplicationStageRddOperationGraphResponse>;
3946
+ /** Obtain high level information and list of Spark Applications corresponding to a batch */
3947
+ search(request?: {
3948
+ /** V1 error format. */
3949
+ '$.xgafv'?: string;
3950
+ /** OAuth access token. */
3951
+ access_token?: string;
3952
+ /** Data format for response. */
3953
+ alt?: string;
3954
+ /** Optional. Search only applications in the chosen state. */
3955
+ applicationStatus?: string;
3956
+ /** JSONP */
3957
+ callback?: string;
3958
+ /** Selector specifying which fields to include in a partial response. */
3959
+ fields?: string;
3960
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
3961
+ key?: string;
3962
+ /** Optional. Latest end timestamp to list. */
3963
+ maxEndTime?: string;
3964
+ /** Optional. Latest start timestamp to list. */
3965
+ maxTime?: string;
3966
+ /** Optional. Earliest end timestamp to list. */
3967
+ minEndTime?: string;
3968
+ /** Optional. Earliest start timestamp to list. */
3969
+ minTime?: string;
3970
+ /** OAuth 2.0 token for the current user. */
3971
+ oauth_token?: string;
3972
+ /** Optional. Maximum number of applications to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
3973
+ pageSize?: number;
3974
+ /** Optional. A page token received from a previous SearchSessionSparkApplications call. Provide this token to retrieve the subsequent page. */
3975
+ pageToken?: string;
3976
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID" */
3977
+ parent: string;
3978
+ /** Returns response with indentations and line breaks. */
3979
+ prettyPrint?: boolean;
3980
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
3981
+ quotaUser?: string;
3982
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
3983
+ upload_protocol?: string;
3984
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
3985
+ uploadType?: string;
3986
+ }): Request<SearchSessionSparkApplicationsResponse>;
3987
+ /** Obtain data corresponding to executors for a Spark Application. */
3988
+ searchExecutors(request?: {
3989
+ /** V1 error format. */
3990
+ '$.xgafv'?: string;
3991
+ /** OAuth access token. */
3992
+ access_token?: string;
3993
+ /** Data format for response. */
3994
+ alt?: string;
3995
+ /** JSONP */
3996
+ callback?: string;
3997
+ /** Optional. Filter to select whether active/ dead or all executors should be selected. */
3998
+ executorStatus?: string;
3999
+ /** Selector specifying which fields to include in a partial response. */
4000
+ fields?: string;
4001
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
4002
+ key?: string;
4003
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
4004
+ name: string;
4005
+ /** OAuth 2.0 token for the current user. */
4006
+ oauth_token?: string;
4007
+ /** Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
4008
+ pageSize?: number;
4009
+ /** Optional. A page token received from a previous SearchSessionSparkApplicationExecutors call. Provide this token to retrieve the subsequent page. */
4010
+ pageToken?: string;
4011
+ /** Required. Parent (Session) resource reference. */
4012
+ parent?: string;
4013
+ /** Returns response with indentations and line breaks. */
4014
+ prettyPrint?: boolean;
4015
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
4016
+ quotaUser?: string;
4017
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
4018
+ upload_protocol?: string;
4019
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
4020
+ uploadType?: string;
4021
+ }): Request<SearchSessionSparkApplicationExecutorsResponse>;
4022
+ /** Obtain executor summary with respect to a spark stage attempt. */
4023
+ searchExecutorStageSummary(request?: {
4024
+ /** V1 error format. */
4025
+ '$.xgafv'?: string;
4026
+ /** OAuth access token. */
4027
+ access_token?: string;
4028
+ /** Data format for response. */
4029
+ alt?: string;
4030
+ /** JSONP */
4031
+ callback?: string;
4032
+ /** Selector specifying which fields to include in a partial response. */
4033
+ fields?: string;
4034
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
4035
+ key?: string;
4036
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
4037
+ name: string;
4038
+ /** OAuth 2.0 token for the current user. */
4039
+ oauth_token?: string;
4040
+ /** Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
4041
+ pageSize?: number;
4042
+ /** Optional. A page token received from a previous SearchSessionSparkApplicationExecutorStageSummary call. Provide this token to retrieve the subsequent page. */
4043
+ pageToken?: string;
4044
+ /** Required. Parent (Session) resource reference. */
4045
+ parent?: string;
4046
+ /** Returns response with indentations and line breaks. */
4047
+ prettyPrint?: boolean;
4048
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
4049
+ quotaUser?: string;
4050
+ /** Required. Stage Attempt ID */
4051
+ stageAttemptId?: number;
4052
+ /** Required. Stage ID */
4053
+ stageId?: string;
4054
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
4055
+ upload_protocol?: string;
4056
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
4057
+ uploadType?: string;
4058
+ }): Request<SearchSessionSparkApplicationExecutorStageSummaryResponse>;
4059
+ /** Obtain list of spark jobs corresponding to a Spark Application. */
4060
+ searchJobs(request?: {
4061
+ /** V1 error format. */
4062
+ '$.xgafv'?: string;
4063
+ /** OAuth access token. */
4064
+ access_token?: string;
4065
+ /** Data format for response. */
4066
+ alt?: string;
4067
+ /** JSONP */
4068
+ callback?: string;
4069
+ /** Selector specifying which fields to include in a partial response. */
4070
+ fields?: string;
4071
+ /** Optional. List only jobs in the specific state. */
4072
+ jobStatus?: string;
4073
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
4074
+ key?: string;
4075
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
4076
+ name: string;
4077
+ /** OAuth 2.0 token for the current user. */
4078
+ oauth_token?: string;
4079
+ /** Optional. Maximum number of jobs to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
4080
+ pageSize?: number;
4081
+ /** Optional. A page token received from a previous SearchSessionSparkApplicationJobs call. Provide this token to retrieve the subsequent page. */
4082
+ pageToken?: string;
4083
+ /** Required. Parent (Session) resource reference. */
4084
+ parent?: string;
4085
+ /** Returns response with indentations and line breaks. */
4086
+ prettyPrint?: boolean;
4087
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
4088
+ quotaUser?: string;
4089
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
4090
+ upload_protocol?: string;
4091
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
4092
+ uploadType?: string;
4093
+ }): Request<SearchSessionSparkApplicationJobsResponse>;
4094
+ /** Obtain data corresponding to SQL Queries for a Spark Application. */
4095
+ searchSqlQueries(request?: {
4096
+ /** V1 error format. */
4097
+ '$.xgafv'?: string;
4098
+ /** OAuth access token. */
4099
+ access_token?: string;
4100
+ /** Data format for response. */
4101
+ alt?: string;
4102
+ /** JSONP */
4103
+ callback?: string;
4104
+ /** Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide. */
4105
+ details?: boolean;
4106
+ /** Selector specifying which fields to include in a partial response. */
4107
+ fields?: string;
4108
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
4109
+ key?: string;
4110
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
4111
+ name: string;
4112
+ /** OAuth 2.0 token for the current user. */
4113
+ oauth_token?: string;
4114
+ /** Optional. Maximum number of queries to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
4115
+ pageSize?: number;
4116
+ /** Optional. A page token received from a previous SearchSessionSparkApplicationSqlQueries call. Provide this token to retrieve the subsequent page. */
4117
+ pageToken?: string;
4118
+ /** Required. Parent (Session) resource reference. */
4119
+ parent?: string;
4120
+ /** Optional. Enables/ disables physical plan description on demand */
4121
+ planDescription?: boolean;
4122
+ /** Returns response with indentations and line breaks. */
4123
+ prettyPrint?: boolean;
4124
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
4125
+ quotaUser?: string;
4126
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
4127
+ upload_protocol?: string;
4128
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
4129
+ uploadType?: string;
4130
+ }): Request<SearchSessionSparkApplicationSqlQueriesResponse>;
4131
+ /** Obtain data corresponding to a spark stage attempts for a Spark Application. */
4132
+ searchStageAttempts(request?: {
4133
+ /** V1 error format. */
4134
+ '$.xgafv'?: string;
4135
+ /** OAuth access token. */
4136
+ access_token?: string;
4137
+ /** Data format for response. */
4138
+ alt?: string;
4139
+ /** JSONP */
4140
+ callback?: string;
4141
+ /** Selector specifying which fields to include in a partial response. */
4142
+ fields?: string;
4143
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
4144
+ key?: string;
4145
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
4146
+ name: string;
4147
+ /** OAuth 2.0 token for the current user. */
4148
+ oauth_token?: string;
4149
+ /** Optional. Maximum number of stage attempts (paging based on stage_attempt_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
4150
+ pageSize?: number;
4151
+ /** Optional. A page token received from a previous SearchSessionSparkApplicationStageAttempts call. Provide this token to retrieve the subsequent page. */
4152
+ pageToken?: string;
4153
+ /** Required. Parent (Session) resource reference. */
4154
+ parent?: string;
4155
+ /** Returns response with indentations and line breaks. */
4156
+ prettyPrint?: boolean;
4157
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
4158
+ quotaUser?: string;
4159
+ /** Required. Stage ID for which attempts are to be fetched */
4160
+ stageId?: string;
4161
+ /** Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field */
4162
+ summaryMetricsMask?: string;
4163
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
4164
+ upload_protocol?: string;
4165
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
4166
+ uploadType?: string;
4167
+ }): Request<SearchSessionSparkApplicationStageAttemptsResponse>;
4168
+ /** Obtain data corresponding to tasks for a spark stage attempt for a Spark Application. */
4169
+ searchStageAttemptTasks(request?: {
4170
+ /** V1 error format. */
4171
+ '$.xgafv'?: string;
4172
+ /** OAuth access token. */
4173
+ access_token?: string;
4174
+ /** Data format for response. */
4175
+ alt?: string;
4176
+ /** JSONP */
4177
+ callback?: string;
4178
+ /** Selector specifying which fields to include in a partial response. */
4179
+ fields?: string;
4180
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
4181
+ key?: string;
4182
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
4183
+ name: string;
4184
+ /** OAuth 2.0 token for the current user. */
4185
+ oauth_token?: string;
4186
+ /** Optional. Maximum number of tasks to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
4187
+ pageSize?: number;
4188
+ /** Optional. A page token received from a previous SearchSessionSparkApplicationStageAttemptTasks call. Provide this token to retrieve the subsequent page. */
4189
+ pageToken?: string;
4190
+ /** Required. Parent (Session) resource reference. */
4191
+ parent?: string;
4192
+ /** Returns response with indentations and line breaks. */
4193
+ prettyPrint?: boolean;
4194
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
4195
+ quotaUser?: string;
4196
+ /** Optional. Sort the tasks by runtime. */
4197
+ sortRuntime?: boolean;
4198
+ /** Optional. Stage Attempt ID */
4199
+ stageAttemptId?: number;
4200
+ /** Optional. Stage ID */
4201
+ stageId?: string;
4202
+ /** Optional. List only tasks in the state. */
4203
+ taskStatus?: string;
4204
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
4205
+ upload_protocol?: string;
4206
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
4207
+ uploadType?: string;
4208
+ }): Request<SearchSessionSparkApplicationStageAttemptTasksResponse>;
4209
+ /** Obtain data corresponding to stages for a Spark Application. */
4210
+ searchStages(request?: {
4211
+ /** V1 error format. */
4212
+ '$.xgafv'?: string;
4213
+ /** OAuth access token. */
4214
+ access_token?: string;
4215
+ /** Data format for response. */
4216
+ alt?: string;
4217
+ /** JSONP */
4218
+ callback?: string;
4219
+ /** Selector specifying which fields to include in a partial response. */
4220
+ fields?: string;
4221
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
4222
+ key?: string;
4223
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
4224
+ name: string;
4225
+ /** OAuth 2.0 token for the current user. */
4226
+ oauth_token?: string;
4227
+ /** Optional. Maximum number of stages (paging based on stage_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
4228
+ pageSize?: number;
4229
+ /** Optional. A page token received from a previous SearchSessionSparkApplicationStages call. Provide this token to retrieve the subsequent page. */
4230
+ pageToken?: string;
4231
+ /** Required. Parent (Session) resource reference. */
4232
+ parent?: string;
4233
+ /** Returns response with indentations and line breaks. */
4234
+ prettyPrint?: boolean;
4235
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
4236
+ quotaUser?: string;
4237
+ /** Optional. List only stages in the given state. */
4238
+ stageStatus?: string;
4239
+ /** Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field */
4240
+ summaryMetricsMask?: string;
4241
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
4242
+ upload_protocol?: string;
4243
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
4244
+ uploadType?: string;
4245
+ }): Request<SearchSessionSparkApplicationStagesResponse>;
4246
+ /** Obtain summary of Executor Summary for a Spark Application */
4247
+ summarizeExecutors(request?: {
4248
+ /** V1 error format. */
4249
+ '$.xgafv'?: string;
4250
+ /** OAuth access token. */
4251
+ access_token?: string;
4252
+ /** Data format for response. */
4253
+ alt?: string;
4254
+ /** JSONP */
4255
+ callback?: string;
4256
+ /** Selector specifying which fields to include in a partial response. */
4257
+ fields?: string;
4258
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
4259
+ key?: string;
4260
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
4261
+ name: string;
4262
+ /** OAuth 2.0 token for the current user. */
4263
+ oauth_token?: string;
4264
+ /** Required. Parent (Session) resource reference. */
4265
+ parent?: string;
4266
+ /** Returns response with indentations and line breaks. */
4267
+ prettyPrint?: boolean;
4268
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
4269
+ quotaUser?: string;
4270
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
4271
+ upload_protocol?: string;
4272
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
4273
+ uploadType?: string;
4274
+ }): Request<SummarizeSessionSparkApplicationExecutorsResponse>;
4275
+ /** Obtain summary of Jobs for a Spark Application */
4276
+ summarizeJobs(request?: {
4277
+ /** V1 error format. */
4278
+ '$.xgafv'?: string;
4279
+ /** OAuth access token. */
4280
+ access_token?: string;
4281
+ /** Data format for response. */
4282
+ alt?: string;
4283
+ /** JSONP */
4284
+ callback?: string;
4285
+ /** Selector specifying which fields to include in a partial response. */
4286
+ fields?: string;
4287
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
4288
+ key?: string;
4289
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
4290
+ name: string;
4291
+ /** OAuth 2.0 token for the current user. */
4292
+ oauth_token?: string;
4293
+ /** Required. Parent (Session) resource reference. */
4294
+ parent?: string;
4295
+ /** Returns response with indentations and line breaks. */
4296
+ prettyPrint?: boolean;
4297
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
4298
+ quotaUser?: string;
4299
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
4300
+ upload_protocol?: string;
4301
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
4302
+ uploadType?: string;
4303
+ }): Request<SummarizeSessionSparkApplicationJobsResponse>;
4304
+ /** Obtain summary of Tasks for a Spark Application Stage Attempt */
4305
+ summarizeStageAttemptTasks(request?: {
4306
+ /** V1 error format. */
4307
+ '$.xgafv'?: string;
4308
+ /** OAuth access token. */
4309
+ access_token?: string;
4310
+ /** Data format for response. */
4311
+ alt?: string;
4312
+ /** JSONP */
4313
+ callback?: string;
4314
+ /** Selector specifying which fields to include in a partial response. */
4315
+ fields?: string;
4316
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
4317
+ key?: string;
4318
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
4319
+ name: string;
4320
+ /** OAuth 2.0 token for the current user. */
4321
+ oauth_token?: string;
4322
+ /** Required. Parent (Session) resource reference. */
4323
+ parent?: string;
4324
+ /** Returns response with indentations and line breaks. */
4325
+ prettyPrint?: boolean;
4326
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
4327
+ quotaUser?: string;
4328
+ /** Required. Stage Attempt ID */
4329
+ stageAttemptId?: number;
4330
+ /** Required. Stage ID */
4331
+ stageId?: string;
4332
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
4333
+ upload_protocol?: string;
4334
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
4335
+ uploadType?: string;
4336
+ }): Request<SummarizeSessionSparkApplicationStageAttemptTasksResponse>;
4337
+ /** Obtain summary of Stages for a Spark Application */
4338
+ summarizeStages(request?: {
4339
+ /** V1 error format. */
4340
+ '$.xgafv'?: string;
4341
+ /** OAuth access token. */
4342
+ access_token?: string;
4343
+ /** Data format for response. */
4344
+ alt?: string;
4345
+ /** JSONP */
4346
+ callback?: string;
4347
+ /** Selector specifying which fields to include in a partial response. */
4348
+ fields?: string;
4349
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
4350
+ key?: string;
4351
+ /** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
4352
+ name: string;
4353
+ /** OAuth 2.0 token for the current user. */
4354
+ oauth_token?: string;
4355
+ /** Required. Parent (Session) resource reference. */
4356
+ parent?: string;
4357
+ /** Returns response with indentations and line breaks. */
4358
+ prettyPrint?: boolean;
4359
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
4360
+ quotaUser?: string;
4361
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
4362
+ upload_protocol?: string;
4363
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
4364
+ uploadType?: string;
4365
+ }): Request<SummarizeSessionSparkApplicationStagesResponse>;
4366
+ /** Write wrapper objects from dataplane to spanner */
4367
+ write(request: {
4368
+ /** V1 error format. */
4369
+ '$.xgafv'?: string;
4370
+ /** OAuth access token. */
4371
+ access_token?: string;
4372
+ /** Data format for response. */
4373
+ alt?: string;
4374
+ /** JSONP */
4375
+ callback?: string;
4376
+ /** Selector specifying which fields to include in a partial response. */
4377
+ fields?: string;
4378
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
4379
+ key?: string;
4380
+ /** Required. The fully qualified name of the spark application to write data about in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
4381
+ name: string;
4382
+ /** OAuth 2.0 token for the current user. */
4383
+ oauth_token?: string;
4384
+ /** Returns response with indentations and line breaks. */
4385
+ prettyPrint?: boolean;
4386
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
4387
+ quotaUser?: string;
4388
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
4389
+ upload_protocol?: string;
4390
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
4391
+ uploadType?: string;
4392
+ /** Request body */
4393
+ resource: WriteSessionSparkApplicationContextRequest;
4394
+ }): Request<{}>;
4395
+ write(
4396
+ request: {
4397
+ /** V1 error format. */
4398
+ '$.xgafv'?: string;
4399
+ /** OAuth access token. */
4400
+ access_token?: string;
4401
+ /** Data format for response. */
4402
+ alt?: string;
4403
+ /** JSONP */
4404
+ callback?: string;
4405
+ /** Selector specifying which fields to include in a partial response. */
4406
+ fields?: string;
4407
+ /** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
4408
+ key?: string;
4409
+ /** Required. The fully qualified name of the spark application to write data about in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
4410
+ name: string;
4411
+ /** OAuth 2.0 token for the current user. */
4412
+ oauth_token?: string;
4413
+ /** Returns response with indentations and line breaks. */
4414
+ prettyPrint?: boolean;
4415
+ /** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
4416
+ quotaUser?: string;
4417
+ /** Upload protocol for media (e.g. "raw", "multipart"). */
4418
+ upload_protocol?: string;
4419
+ /** Legacy upload protocol for media (e.g. "media", "multipart"). */
4420
+ uploadType?: string;
4421
+ },
4422
+ body: WriteSessionSparkApplicationContextRequest
4423
+ ): Request<{}>;
2133
4424
  }
2134
4425
  interface SessionsResource {
2135
4426
  /** Create an interactive session asynchronously. */
@@ -2345,6 +4636,7 @@ declare namespace gapi.client {
2345
4636
  },
2346
4637
  body: TerminateSessionRequest
2347
4638
  ): Request<Operation>;
4639
+ sparkApplications: SparkApplicationsResource;
2348
4640
  }
2349
4641
  interface SessionTemplatesResource {
2350
4642
  /** Create a session template synchronously. */