@maxim_mazurok/gapi.client.dataproc-v1 0.0.20240821 → 0.0.20240919
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.d.ts +2410 -126
- package/package.json +1 -1
package/index.d.ts
CHANGED
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
// This file was generated by https://github.com/Maxim-Mazurok/google-api-typings-generator. Please do not edit it manually.
|
|
10
10
|
// In case of any problems please post issue to https://github.com/Maxim-Mazurok/google-api-typings-generator
|
|
11
11
|
// Generated from: https://dataproc.googleapis.com/$discovery/rest?version=v1
|
|
12
|
-
// Revision:
|
|
12
|
+
// Revision: 20240919
|
|
13
13
|
|
|
14
14
|
/// <reference types="gapi.client" />
|
|
15
15
|
|
|
@@ -30,6 +30,68 @@ declare namespace gapi.client {
|
|
|
30
30
|
/** Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4. */
|
|
31
31
|
acceleratorTypeUri?: string;
|
|
32
32
|
}
|
|
33
|
+
interface AccessSessionSparkApplicationEnvironmentInfoResponse {
|
|
34
|
+
/** Details about the Environment that the application is running in. */
|
|
35
|
+
applicationEnvironmentInfo?: ApplicationEnvironmentInfo;
|
|
36
|
+
}
|
|
37
|
+
interface AccessSessionSparkApplicationJobResponse {
|
|
38
|
+
/** Output only. Data corresponding to a spark job. */
|
|
39
|
+
jobData?: JobData;
|
|
40
|
+
}
|
|
41
|
+
interface AccessSessionSparkApplicationResponse {
|
|
42
|
+
/** Output only. High level information corresponding to an application. */
|
|
43
|
+
application?: ApplicationInfo;
|
|
44
|
+
}
|
|
45
|
+
interface AccessSessionSparkApplicationSqlQueryResponse {
|
|
46
|
+
/** SQL Execution Data */
|
|
47
|
+
executionData?: SqlExecutionUiData;
|
|
48
|
+
}
|
|
49
|
+
interface AccessSessionSparkApplicationSqlSparkPlanGraphResponse {
|
|
50
|
+
/** SparkPlanGraph for a Spark Application execution. */
|
|
51
|
+
sparkPlanGraph?: SparkPlanGraph;
|
|
52
|
+
}
|
|
53
|
+
interface AccessSessionSparkApplicationStageAttemptResponse {
|
|
54
|
+
/** Output only. Data corresponding to a stage. */
|
|
55
|
+
stageData?: StageData;
|
|
56
|
+
}
|
|
57
|
+
interface AccessSessionSparkApplicationStageRddOperationGraphResponse {
|
|
58
|
+
/** RDD operation graph for a Spark Application Stage. */
|
|
59
|
+
rddOperationGraph?: RddOperationGraph;
|
|
60
|
+
}
|
|
61
|
+
interface AccessSparkApplicationEnvironmentInfoResponse {
|
|
62
|
+
/** Details about the Environment that the application is running in. */
|
|
63
|
+
applicationEnvironmentInfo?: ApplicationEnvironmentInfo;
|
|
64
|
+
}
|
|
65
|
+
interface AccessSparkApplicationJobResponse {
|
|
66
|
+
/** Output only. Data corresponding to a spark job. */
|
|
67
|
+
jobData?: JobData;
|
|
68
|
+
}
|
|
69
|
+
interface AccessSparkApplicationResponse {
|
|
70
|
+
/** Output only. High level information corresponding to an application. */
|
|
71
|
+
application?: ApplicationInfo;
|
|
72
|
+
}
|
|
73
|
+
interface AccessSparkApplicationSqlQueryResponse {
|
|
74
|
+
/** SQL Execution Data */
|
|
75
|
+
executionData?: SqlExecutionUiData;
|
|
76
|
+
}
|
|
77
|
+
interface AccessSparkApplicationSqlSparkPlanGraphResponse {
|
|
78
|
+
/** SparkPlanGraph for a Spark Application execution. */
|
|
79
|
+
sparkPlanGraph?: SparkPlanGraph;
|
|
80
|
+
}
|
|
81
|
+
interface AccessSparkApplicationStageAttemptResponse {
|
|
82
|
+
/** Output only. Data corresponding to a stage. */
|
|
83
|
+
stageData?: StageData;
|
|
84
|
+
}
|
|
85
|
+
interface AccessSparkApplicationStageRddOperationGraphResponse {
|
|
86
|
+
/** RDD operation graph for a Spark Application Stage. */
|
|
87
|
+
rddOperationGraph?: RddOperationGraph;
|
|
88
|
+
}
|
|
89
|
+
interface AccumulableInfo {
|
|
90
|
+
accumullableInfoId?: string;
|
|
91
|
+
name?: string;
|
|
92
|
+
update?: string;
|
|
93
|
+
value?: string;
|
|
94
|
+
}
|
|
33
95
|
interface AnalyzeBatchRequest {
|
|
34
96
|
/** Optional. A unique ID used to identify the request. If the service receives two AnalyzeBatchRequest (http://cloud/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.AnalyzeBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first request created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. */
|
|
35
97
|
requestId?: string;
|
|
@@ -52,6 +114,40 @@ declare namespace gapi.client {
|
|
|
52
114
|
/** Output only. Warnings encountered during operation execution. */
|
|
53
115
|
warnings?: string[];
|
|
54
116
|
}
|
|
117
|
+
interface ApplicationAttemptInfo {
|
|
118
|
+
appSparkVersion?: string;
|
|
119
|
+
attemptId?: string;
|
|
120
|
+
completed?: boolean;
|
|
121
|
+
durationMillis?: string;
|
|
122
|
+
endTime?: string;
|
|
123
|
+
lastUpdated?: string;
|
|
124
|
+
sparkUser?: string;
|
|
125
|
+
startTime?: string;
|
|
126
|
+
}
|
|
127
|
+
interface ApplicationEnvironmentInfo {
|
|
128
|
+
classpathEntries?: {[P in string]: string};
|
|
129
|
+
hadoopProperties?: {[P in string]: string};
|
|
130
|
+
metricsProperties?: {[P in string]: string};
|
|
131
|
+
resourceProfiles?: ResourceProfileInfo[];
|
|
132
|
+
runtime?: SparkRuntimeInfo;
|
|
133
|
+
sparkProperties?: {[P in string]: string};
|
|
134
|
+
systemProperties?: {[P in string]: string};
|
|
135
|
+
}
|
|
136
|
+
interface ApplicationInfo {
|
|
137
|
+
applicationContextIngestionStatus?: string;
|
|
138
|
+
applicationId?: string;
|
|
139
|
+
attempts?: ApplicationAttemptInfo[];
|
|
140
|
+
coresGranted?: number;
|
|
141
|
+
coresPerExecutor?: number;
|
|
142
|
+
maxCores?: number;
|
|
143
|
+
memoryPerExecutorMb?: number;
|
|
144
|
+
name?: string;
|
|
145
|
+
quantileDataStatus?: string;
|
|
146
|
+
}
|
|
147
|
+
interface AppSummary {
|
|
148
|
+
numCompletedJobs?: number;
|
|
149
|
+
numCompletedStages?: number;
|
|
150
|
+
}
|
|
55
151
|
interface AutoscalingConfig {
|
|
56
152
|
/** Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region. */
|
|
57
153
|
policyUri?: string;
|
|
@@ -284,10 +380,33 @@ declare namespace gapi.client {
|
|
|
284
380
|
/** Output only. Additional state information that includes status reported by the agent. */
|
|
285
381
|
substate?: string;
|
|
286
382
|
}
|
|
383
|
+
interface ClusterToRepair {
|
|
384
|
+
/** Required. Repair action to take on the cluster resource. */
|
|
385
|
+
clusterRepairAction?: string;
|
|
386
|
+
}
|
|
287
387
|
interface ConfidentialInstanceConfig {
|
|
288
388
|
/** Optional. Defines whether the instance should have confidential compute enabled. */
|
|
289
389
|
enableConfidentialCompute?: boolean;
|
|
290
390
|
}
|
|
391
|
+
interface ConsolidatedExecutorSummary {
|
|
392
|
+
activeTasks?: number;
|
|
393
|
+
completedTasks?: number;
|
|
394
|
+
count?: number;
|
|
395
|
+
diskUsed?: string;
|
|
396
|
+
failedTasks?: number;
|
|
397
|
+
isExcluded?: number;
|
|
398
|
+
maxMemory?: string;
|
|
399
|
+
memoryMetrics?: MemoryMetrics;
|
|
400
|
+
memoryUsed?: string;
|
|
401
|
+
rddBlocks?: number;
|
|
402
|
+
totalCores?: number;
|
|
403
|
+
totalDurationMillis?: string;
|
|
404
|
+
totalGcTimeMillis?: string;
|
|
405
|
+
totalInputBytes?: string;
|
|
406
|
+
totalShuffleRead?: string;
|
|
407
|
+
totalShuffleWrite?: string;
|
|
408
|
+
totalTasks?: number;
|
|
409
|
+
}
|
|
291
410
|
interface DataprocMetricConfig {
|
|
292
411
|
/** Required. Metrics sources to enable. */
|
|
293
412
|
metrics?: Metric[];
|
|
@@ -369,6 +488,89 @@ declare namespace gapi.client {
|
|
|
369
488
|
/** Optional. The duration after which the workload will be terminated, specified as the JSON representation for Duration (https://protobuf.dev/programming-guides/proto3/#json). When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first. */
|
|
370
489
|
ttl?: string;
|
|
371
490
|
}
|
|
491
|
+
interface ExecutorMetrics {
|
|
492
|
+
metrics?: {[P in string]: string};
|
|
493
|
+
}
|
|
494
|
+
interface ExecutorMetricsDistributions {
|
|
495
|
+
diskBytesSpilled?: number[];
|
|
496
|
+
failedTasks?: number[];
|
|
497
|
+
inputBytes?: number[];
|
|
498
|
+
inputRecords?: number[];
|
|
499
|
+
killedTasks?: number[];
|
|
500
|
+
memoryBytesSpilled?: number[];
|
|
501
|
+
outputBytes?: number[];
|
|
502
|
+
outputRecords?: number[];
|
|
503
|
+
peakMemoryMetrics?: ExecutorPeakMetricsDistributions;
|
|
504
|
+
quantiles?: number[];
|
|
505
|
+
shuffleRead?: number[];
|
|
506
|
+
shuffleReadRecords?: number[];
|
|
507
|
+
shuffleWrite?: number[];
|
|
508
|
+
shuffleWriteRecords?: number[];
|
|
509
|
+
succeededTasks?: number[];
|
|
510
|
+
taskTimeMillis?: number[];
|
|
511
|
+
}
|
|
512
|
+
interface ExecutorPeakMetricsDistributions {
|
|
513
|
+
executorMetrics?: ExecutorMetrics[];
|
|
514
|
+
quantiles?: number[];
|
|
515
|
+
}
|
|
516
|
+
interface ExecutorResourceRequest {
|
|
517
|
+
amount?: string;
|
|
518
|
+
discoveryScript?: string;
|
|
519
|
+
resourceName?: string;
|
|
520
|
+
vendor?: string;
|
|
521
|
+
}
|
|
522
|
+
interface ExecutorStageSummary {
|
|
523
|
+
diskBytesSpilled?: string;
|
|
524
|
+
executorId?: string;
|
|
525
|
+
failedTasks?: number;
|
|
526
|
+
inputBytes?: string;
|
|
527
|
+
inputRecords?: string;
|
|
528
|
+
isExcludedForStage?: boolean;
|
|
529
|
+
killedTasks?: number;
|
|
530
|
+
memoryBytesSpilled?: string;
|
|
531
|
+
outputBytes?: string;
|
|
532
|
+
outputRecords?: string;
|
|
533
|
+
peakMemoryMetrics?: ExecutorMetrics;
|
|
534
|
+
shuffleRead?: string;
|
|
535
|
+
shuffleReadRecords?: string;
|
|
536
|
+
shuffleWrite?: string;
|
|
537
|
+
shuffleWriteRecords?: string;
|
|
538
|
+
stageAttemptId?: number;
|
|
539
|
+
stageId?: string;
|
|
540
|
+
succeededTasks?: number;
|
|
541
|
+
taskTimeMillis?: string;
|
|
542
|
+
}
|
|
543
|
+
interface ExecutorSummary {
|
|
544
|
+
activeTasks?: number;
|
|
545
|
+
addTime?: string;
|
|
546
|
+
attributes?: {[P in string]: string};
|
|
547
|
+
completedTasks?: number;
|
|
548
|
+
diskUsed?: string;
|
|
549
|
+
excludedInStages?: string[];
|
|
550
|
+
executorId?: string;
|
|
551
|
+
executorLogs?: {[P in string]: string};
|
|
552
|
+
failedTasks?: number;
|
|
553
|
+
hostPort?: string;
|
|
554
|
+
isActive?: boolean;
|
|
555
|
+
isExcluded?: boolean;
|
|
556
|
+
maxMemory?: string;
|
|
557
|
+
maxTasks?: number;
|
|
558
|
+
memoryMetrics?: MemoryMetrics;
|
|
559
|
+
memoryUsed?: string;
|
|
560
|
+
peakMemoryMetrics?: ExecutorMetrics;
|
|
561
|
+
rddBlocks?: number;
|
|
562
|
+
removeReason?: string;
|
|
563
|
+
removeTime?: string;
|
|
564
|
+
resourceProfileId?: number;
|
|
565
|
+
resources?: {[P in string]: ResourceInformation};
|
|
566
|
+
totalCores?: number;
|
|
567
|
+
totalDurationMillis?: string;
|
|
568
|
+
totalGcTimeMillis?: string;
|
|
569
|
+
totalInputBytes?: string;
|
|
570
|
+
totalShuffleRead?: string;
|
|
571
|
+
totalShuffleWrite?: string;
|
|
572
|
+
totalTasks?: number;
|
|
573
|
+
}
|
|
372
574
|
interface Expr {
|
|
373
575
|
/** Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */
|
|
374
576
|
description?: string;
|
|
@@ -390,7 +592,7 @@ declare namespace gapi.client {
|
|
|
390
592
|
mainClass?: string;
|
|
391
593
|
/** The HCFS URI of the jar file that contains the main class. */
|
|
392
594
|
mainJarFileUri?: string;
|
|
393
|
-
/** Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might
|
|
595
|
+
/** Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. */
|
|
394
596
|
properties?: {[P in string]: string};
|
|
395
597
|
/** Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. */
|
|
396
598
|
savepointUri?: string;
|
|
@@ -531,6 +733,14 @@ declare namespace gapi.client {
|
|
|
531
733
|
/** Required. The encrypted credentials being injected in to the cluster.The client is responsible for encrypting the credentials in a way that is supported by the cluster.A wrapped value is used here so that the actual contents of the encrypted credentials are not written to audit logs. */
|
|
532
734
|
credentialsCiphertext?: string;
|
|
533
735
|
}
|
|
736
|
+
interface InputMetrics {
|
|
737
|
+
bytesRead?: string;
|
|
738
|
+
recordsRead?: string;
|
|
739
|
+
}
|
|
740
|
+
interface InputQuantileMetrics {
|
|
741
|
+
bytesRead?: Quantiles;
|
|
742
|
+
recordsRead?: Quantiles;
|
|
743
|
+
}
|
|
534
744
|
interface InstanceFlexibilityPolicy {
|
|
535
745
|
/** Optional. List of instance selection options that the group will use when creating new VMs. */
|
|
536
746
|
instanceSelectionList?: InstanceSelection[];
|
|
@@ -657,6 +867,30 @@ declare namespace gapi.client {
|
|
|
657
867
|
/** Output only. The collection of YARN applications spun up by this job.Beta Feature: This report is available for testing purposes only. It might be changed before final release. */
|
|
658
868
|
yarnApplications?: YarnApplication[];
|
|
659
869
|
}
|
|
870
|
+
interface JobData {
|
|
871
|
+
completionTime?: string;
|
|
872
|
+
description?: string;
|
|
873
|
+
jobGroup?: string;
|
|
874
|
+
jobId?: string;
|
|
875
|
+
killTasksSummary?: {[P in string]: number};
|
|
876
|
+
name?: string;
|
|
877
|
+
numActiveStages?: number;
|
|
878
|
+
numActiveTasks?: number;
|
|
879
|
+
numCompletedIndices?: number;
|
|
880
|
+
numCompletedStages?: number;
|
|
881
|
+
numCompletedTasks?: number;
|
|
882
|
+
numFailedStages?: number;
|
|
883
|
+
numFailedTasks?: number;
|
|
884
|
+
numKilledTasks?: number;
|
|
885
|
+
numSkippedStages?: number;
|
|
886
|
+
numSkippedTasks?: number;
|
|
887
|
+
numTasks?: number;
|
|
888
|
+
skippedStages?: number[];
|
|
889
|
+
sqlExecutionId?: string;
|
|
890
|
+
stageIds?: string[];
|
|
891
|
+
status?: string;
|
|
892
|
+
submissionTime?: string;
|
|
893
|
+
}
|
|
660
894
|
interface JobMetadata {
|
|
661
895
|
/** Output only. The job id. */
|
|
662
896
|
jobId?: string;
|
|
@@ -687,6 +921,20 @@ declare namespace gapi.client {
|
|
|
687
921
|
/** Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). */
|
|
688
922
|
maxFailuresTotal?: number;
|
|
689
923
|
}
|
|
924
|
+
interface JobsSummary {
|
|
925
|
+
/** Number of active jobs */
|
|
926
|
+
activeJobs?: number;
|
|
927
|
+
/** Spark Application Id */
|
|
928
|
+
applicationId?: string;
|
|
929
|
+
/** Attempts info */
|
|
930
|
+
attempts?: ApplicationAttemptInfo[];
|
|
931
|
+
/** Number of completed jobs */
|
|
932
|
+
completedJobs?: number;
|
|
933
|
+
/** Number of failed jobs */
|
|
934
|
+
failedJobs?: number;
|
|
935
|
+
/** Spark Scheduling mode */
|
|
936
|
+
schedulingMode?: string;
|
|
937
|
+
}
|
|
690
938
|
interface JobStatus {
|
|
691
939
|
/** Optional. Output only. Job state details, such as an error description if the state is ERROR. */
|
|
692
940
|
details?: string;
|
|
@@ -833,6 +1081,12 @@ declare namespace gapi.client {
|
|
|
833
1081
|
/** Output only. The name of the Instance Template used for the Managed Instance Group. */
|
|
834
1082
|
instanceTemplateName?: string;
|
|
835
1083
|
}
|
|
1084
|
+
interface MemoryMetrics {
|
|
1085
|
+
totalOffHeapStorageMemory?: string;
|
|
1086
|
+
totalOnHeapStorageMemory?: string;
|
|
1087
|
+
usedOffHeapStorageMemory?: string;
|
|
1088
|
+
usedOnHeapStorageMemory?: string;
|
|
1089
|
+
}
|
|
836
1090
|
interface MetastoreConfig {
|
|
837
1091
|
/** Required. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name] */
|
|
838
1092
|
dataprocMetastoreService?: string;
|
|
@@ -937,6 +1191,14 @@ declare namespace gapi.client {
|
|
|
937
1191
|
/** Optional. Job is a Trino job. */
|
|
938
1192
|
trinoJob?: TrinoJob;
|
|
939
1193
|
}
|
|
1194
|
+
interface OutputMetrics {
|
|
1195
|
+
bytesWritten?: string;
|
|
1196
|
+
recordsWritten?: string;
|
|
1197
|
+
}
|
|
1198
|
+
interface OutputQuantileMetrics {
|
|
1199
|
+
bytesWritten?: Quantiles;
|
|
1200
|
+
recordsWritten?: Quantiles;
|
|
1201
|
+
}
|
|
940
1202
|
interface ParameterValidation {
|
|
941
1203
|
/** Validation based on regular expressions. */
|
|
942
1204
|
regex?: RegexValidation;
|
|
@@ -973,6 +1235,10 @@ declare namespace gapi.client {
|
|
|
973
1235
|
/** Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies). */
|
|
974
1236
|
version?: number;
|
|
975
1237
|
}
|
|
1238
|
+
interface PoolData {
|
|
1239
|
+
name?: string;
|
|
1240
|
+
stageIds?: string[];
|
|
1241
|
+
}
|
|
976
1242
|
interface PrestoJob {
|
|
977
1243
|
/** Optional. Presto client tags to attach to this query */
|
|
978
1244
|
clientTags?: string[];
|
|
@@ -989,6 +1255,15 @@ declare namespace gapi.client {
|
|
|
989
1255
|
/** A list of queries. */
|
|
990
1256
|
queryList?: QueryList;
|
|
991
1257
|
}
|
|
1258
|
+
interface ProcessSummary {
|
|
1259
|
+
addTime?: string;
|
|
1260
|
+
hostPort?: string;
|
|
1261
|
+
isActive?: boolean;
|
|
1262
|
+
processId?: string;
|
|
1263
|
+
processLogs?: {[P in string]: string};
|
|
1264
|
+
removeTime?: string;
|
|
1265
|
+
totalCores?: number;
|
|
1266
|
+
}
|
|
992
1267
|
interface PyPiRepositoryConfig {
|
|
993
1268
|
/** Optional. PyPi repository address */
|
|
994
1269
|
pypiRepository?: string;
|
|
@@ -1025,15 +1300,79 @@ declare namespace gapi.client {
|
|
|
1025
1300
|
/** Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. */
|
|
1026
1301
|
pythonFileUris?: string[];
|
|
1027
1302
|
}
|
|
1303
|
+
interface Quantiles {
|
|
1304
|
+
count?: string;
|
|
1305
|
+
maximum?: string;
|
|
1306
|
+
minimum?: string;
|
|
1307
|
+
percentile25?: string;
|
|
1308
|
+
percentile50?: string;
|
|
1309
|
+
percentile75?: string;
|
|
1310
|
+
sum?: string;
|
|
1311
|
+
}
|
|
1028
1312
|
interface QueryList {
|
|
1029
1313
|
/** Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } */
|
|
1030
1314
|
queries?: string[];
|
|
1031
1315
|
}
|
|
1316
|
+
interface RddDataDistribution {
|
|
1317
|
+
address?: string;
|
|
1318
|
+
diskUsed?: string;
|
|
1319
|
+
memoryRemaining?: string;
|
|
1320
|
+
memoryUsed?: string;
|
|
1321
|
+
offHeapMemoryRemaining?: string;
|
|
1322
|
+
offHeapMemoryUsed?: string;
|
|
1323
|
+
onHeapMemoryRemaining?: string;
|
|
1324
|
+
onHeapMemoryUsed?: string;
|
|
1325
|
+
}
|
|
1326
|
+
interface RddOperationCluster {
|
|
1327
|
+
childClusters?: RddOperationCluster[];
|
|
1328
|
+
childNodes?: RddOperationNode[];
|
|
1329
|
+
name?: string;
|
|
1330
|
+
rddClusterId?: string;
|
|
1331
|
+
}
|
|
1332
|
+
interface RddOperationEdge {
|
|
1333
|
+
fromId?: number;
|
|
1334
|
+
toId?: number;
|
|
1335
|
+
}
|
|
1336
|
+
interface RddOperationGraph {
|
|
1337
|
+
edges?: RddOperationEdge[];
|
|
1338
|
+
incomingEdges?: RddOperationEdge[];
|
|
1339
|
+
outgoingEdges?: RddOperationEdge[];
|
|
1340
|
+
rootCluster?: RddOperationCluster;
|
|
1341
|
+
stageId?: string;
|
|
1342
|
+
}
|
|
1343
|
+
interface RddOperationNode {
|
|
1344
|
+
barrier?: boolean;
|
|
1345
|
+
cached?: boolean;
|
|
1346
|
+
callsite?: string;
|
|
1347
|
+
name?: string;
|
|
1348
|
+
nodeId?: number;
|
|
1349
|
+
outputDeterministicLevel?: string;
|
|
1350
|
+
}
|
|
1351
|
+
interface RddPartitionInfo {
|
|
1352
|
+
blockName?: string;
|
|
1353
|
+
diskUsed?: string;
|
|
1354
|
+
executors?: string[];
|
|
1355
|
+
memoryUsed?: string;
|
|
1356
|
+
storageLevel?: string;
|
|
1357
|
+
}
|
|
1358
|
+
interface RddStorageInfo {
|
|
1359
|
+
dataDistribution?: RddDataDistribution[];
|
|
1360
|
+
diskUsed?: string;
|
|
1361
|
+
memoryUsed?: string;
|
|
1362
|
+
name?: string;
|
|
1363
|
+
numCachedPartitions?: number;
|
|
1364
|
+
numPartitions?: number;
|
|
1365
|
+
partitions?: RddPartitionInfo[];
|
|
1366
|
+
rddStorageId?: number;
|
|
1367
|
+
storageLevel?: string;
|
|
1368
|
+
}
|
|
1032
1369
|
interface RegexValidation {
|
|
1033
1370
|
/** Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient). */
|
|
1034
1371
|
regexes?: string[];
|
|
1035
1372
|
}
|
|
1036
1373
|
interface RepairClusterRequest {
|
|
1374
|
+
/** Optional. Cluster to be repaired */
|
|
1375
|
+
cluster?: ClusterToRepair;
|
|
1037
1376
|
/** Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist. */
|
|
1038
1377
|
clusterUuid?: string;
|
|
1039
1378
|
/** Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning facilitates the removal of cluster nodes without interrupting jobs in progress. The timeout specifies the amount of time to wait for jobs finish before forcefully removing nodes. The default timeout is 0 for forceful decommissioning, and the maximum timeout period is 1 day. (see JSON Mapping—Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).graceful_decommission_timeout is supported in Dataproc image versions 1.2+. */
|
|
@@ -1075,6 +1414,15 @@ declare namespace gapi.client {
|
|
|
1075
1414
|
/** Required. The number of running instances for the node group to maintain. The group adds or removes instances to maintain the number of instances specified by this parameter. */
|
|
1076
1415
|
size?: number;
|
|
1077
1416
|
}
|
|
1417
|
+
interface ResourceInformation {
|
|
1418
|
+
addresses?: string[];
|
|
1419
|
+
name?: string;
|
|
1420
|
+
}
|
|
1421
|
+
interface ResourceProfileInfo {
|
|
1422
|
+
executorResources?: {[P in string]: ExecutorResourceRequest};
|
|
1423
|
+
resourceProfileId?: number;
|
|
1424
|
+
taskResources?: {[P in string]: TaskResourceRequest};
|
|
1425
|
+
}
|
|
1078
1426
|
interface RuntimeConfig {
|
|
1079
1427
|
/** Optional. Autotuning configuration of the workload. */
|
|
1080
1428
|
autotuningConfig?: AutotuningConfig;
|
|
@@ -1101,6 +1449,102 @@ declare namespace gapi.client {
|
|
|
1101
1449
|
/** Output only. A URI pointing to the location of the stdout and stderr of the workload. */
|
|
1102
1450
|
outputUri?: string;
|
|
1103
1451
|
}
|
|
1452
|
+
interface SearchSessionSparkApplicationExecutorsResponse {
|
|
1453
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationExecutorsRequest. */
|
|
1454
|
+
nextPageToken?: string;
|
|
1455
|
+
/** Details about executors used by the application. */
|
|
1456
|
+
sparkApplicationExecutors?: ExecutorSummary[];
|
|
1457
|
+
}
|
|
1458
|
+
interface SearchSessionSparkApplicationExecutorStageSummaryResponse {
|
|
1459
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationExecutorStageSummaryRequest. */
|
|
1460
|
+
nextPageToken?: string;
|
|
1461
|
+
/** Details about executors used by the application stage. */
|
|
1462
|
+
sparkApplicationStageExecutors?: ExecutorStageSummary[];
|
|
1463
|
+
}
|
|
1464
|
+
interface SearchSessionSparkApplicationJobsResponse {
|
|
1465
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationJobsRequest. */
|
|
1466
|
+
nextPageToken?: string;
|
|
1467
|
+
/** Output only. Data corresponding to a spark job. */
|
|
1468
|
+
sparkApplicationJobs?: JobData[];
|
|
1469
|
+
}
|
|
1470
|
+
interface SearchSessionSparkApplicationSqlQueriesResponse {
|
|
1471
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationSqlQueriesRequest. */
|
|
1472
|
+
nextPageToken?: string;
|
|
1473
|
+
/** Output only. SQL Execution Data */
|
|
1474
|
+
sparkApplicationSqlQueries?: SqlExecutionUiData[];
|
|
1475
|
+
}
|
|
1476
|
+
interface SearchSessionSparkApplicationsResponse {
|
|
1477
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationsRequest. */
|
|
1478
|
+
nextPageToken?: string;
|
|
1479
|
+
/** Output only. High level information corresponding to an application. */
|
|
1480
|
+
sparkApplications?: SparkApplication[];
|
|
1481
|
+
}
|
|
1482
|
+
interface SearchSessionSparkApplicationStageAttemptsResponse {
|
|
1483
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationStageAttemptsRequest. */
|
|
1484
|
+
nextPageToken?: string;
|
|
1485
|
+
/** Output only. Data corresponding to a stage attempts */
|
|
1486
|
+
sparkApplicationStageAttempts?: StageData[];
|
|
1487
|
+
}
|
|
1488
|
+
interface SearchSessionSparkApplicationStageAttemptTasksResponse {
|
|
1489
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationStageAttemptTasksRequest. */
|
|
1490
|
+
nextPageToken?: string;
|
|
1491
|
+
/** Output only. Data corresponding to tasks created by spark. */
|
|
1492
|
+
sparkApplicationStageAttemptTasks?: TaskData[];
|
|
1493
|
+
}
|
|
1494
|
+
interface SearchSessionSparkApplicationStagesResponse {
|
|
1495
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationStages. */
|
|
1496
|
+
nextPageToken?: string;
|
|
1497
|
+
/** Output only. Data corresponding to a stage. */
|
|
1498
|
+
sparkApplicationStages?: StageData[];
|
|
1499
|
+
}
|
|
1500
|
+
interface SearchSparkApplicationExecutorsResponse {
|
|
1501
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationExecutorsListRequest. */
|
|
1502
|
+
nextPageToken?: string;
|
|
1503
|
+
/** Details about executors used by the application. */
|
|
1504
|
+
sparkApplicationExecutors?: ExecutorSummary[];
|
|
1505
|
+
}
|
|
1506
|
+
interface SearchSparkApplicationExecutorStageSummaryResponse {
|
|
1507
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationExecutorsListRequest. */
|
|
1508
|
+
nextPageToken?: string;
|
|
1509
|
+
/** Details about executors used by the application stage. */
|
|
1510
|
+
sparkApplicationStageExecutors?: ExecutorStageSummary[];
|
|
1511
|
+
}
|
|
1512
|
+
interface SearchSparkApplicationJobsResponse {
|
|
1513
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationJobsRequest. */
|
|
1514
|
+
nextPageToken?: string;
|
|
1515
|
+
/** Output only. Data corresponding to a spark job. */
|
|
1516
|
+
sparkApplicationJobs?: JobData[];
|
|
1517
|
+
}
|
|
1518
|
+
interface SearchSparkApplicationSqlQueriesResponse {
|
|
1519
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationSqlQueriesRequest. */
|
|
1520
|
+
nextPageToken?: string;
|
|
1521
|
+
/** Output only. SQL Execution Data */
|
|
1522
|
+
sparkApplicationSqlQueries?: SqlExecutionUiData[];
|
|
1523
|
+
}
|
|
1524
|
+
interface SearchSparkApplicationsResponse {
|
|
1525
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationsRequest. */
|
|
1526
|
+
nextPageToken?: string;
|
|
1527
|
+
/** Output only. High level information corresponding to an application. */
|
|
1528
|
+
sparkApplications?: SparkApplication[];
|
|
1529
|
+
}
|
|
1530
|
+
interface SearchSparkApplicationStageAttemptsResponse {
|
|
1531
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListSparkApplicationStageAttemptsRequest. */
|
|
1532
|
+
nextPageToken?: string;
|
|
1533
|
+
/** Output only. Data corresponding to a stage attempts */
|
|
1534
|
+
sparkApplicationStageAttempts?: StageData[];
|
|
1535
|
+
}
|
|
1536
|
+
interface SearchSparkApplicationStageAttemptTasksResponse {
|
|
1537
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListSparkApplicationStageAttemptTasksRequest. */
|
|
1538
|
+
nextPageToken?: string;
|
|
1539
|
+
/** Output only. Data corresponding to tasks created by spark. */
|
|
1540
|
+
sparkApplicationStageAttemptTasks?: TaskData[];
|
|
1541
|
+
}
|
|
1542
|
+
interface SearchSparkApplicationStagesResponse {
|
|
1543
|
+
/** This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationStages. */
|
|
1544
|
+
nextPageToken?: string;
|
|
1545
|
+
/** Output only. Data corresponding to a stage. */
|
|
1546
|
+
sparkApplicationStages?: StageData[];
|
|
1547
|
+
}
|
|
1104
1548
|
interface SecurityConfig {
|
|
1105
1549
|
/** Optional. Identity related configuration, including service account based secure multi-tenancy user mappings. */
|
|
1106
1550
|
identityConfig?: IdentityConfig;
|
|
@@ -1203,6 +1647,66 @@ declare namespace gapi.client {
|
|
|
1203
1647
|
/** Optional. Defines whether instances have the vTPM enabled. */
|
|
1204
1648
|
enableVtpm?: boolean;
|
|
1205
1649
|
}
|
|
1650
|
+
interface ShufflePushReadMetrics {
|
|
1651
|
+
corruptMergedBlockChunks?: string;
|
|
1652
|
+
localMergedBlocksFetched?: string;
|
|
1653
|
+
localMergedBytesRead?: string;
|
|
1654
|
+
localMergedChunksFetched?: string;
|
|
1655
|
+
mergedFetchFallbackCount?: string;
|
|
1656
|
+
remoteMergedBlocksFetched?: string;
|
|
1657
|
+
remoteMergedBytesRead?: string;
|
|
1658
|
+
remoteMergedChunksFetched?: string;
|
|
1659
|
+
remoteMergedReqsDuration?: string;
|
|
1660
|
+
}
|
|
1661
|
+
interface ShufflePushReadQuantileMetrics {
|
|
1662
|
+
corruptMergedBlockChunks?: Quantiles;
|
|
1663
|
+
localMergedBlocksFetched?: Quantiles;
|
|
1664
|
+
localMergedBytesRead?: Quantiles;
|
|
1665
|
+
localMergedChunksFetched?: Quantiles;
|
|
1666
|
+
mergedFetchFallbackCount?: Quantiles;
|
|
1667
|
+
remoteMergedBlocksFetched?: Quantiles;
|
|
1668
|
+
remoteMergedBytesRead?: Quantiles;
|
|
1669
|
+
remoteMergedChunksFetched?: Quantiles;
|
|
1670
|
+
remoteMergedReqsDuration?: Quantiles;
|
|
1671
|
+
}
|
|
1672
|
+
interface ShuffleReadMetrics {
|
|
1673
|
+
fetchWaitTimeMillis?: string;
|
|
1674
|
+
localBlocksFetched?: string;
|
|
1675
|
+
localBytesRead?: string;
|
|
1676
|
+
recordsRead?: string;
|
|
1677
|
+
remoteBlocksFetched?: string;
|
|
1678
|
+
remoteBytesRead?: string;
|
|
1679
|
+
remoteBytesReadToDisk?: string;
|
|
1680
|
+
remoteReqsDuration?: string;
|
|
1681
|
+
shufflePushReadMetrics?: ShufflePushReadMetrics;
|
|
1682
|
+
}
|
|
1683
|
+
interface ShuffleReadQuantileMetrics {
|
|
1684
|
+
fetchWaitTimeMillis?: Quantiles;
|
|
1685
|
+
localBlocksFetched?: Quantiles;
|
|
1686
|
+
readBytes?: Quantiles;
|
|
1687
|
+
readRecords?: Quantiles;
|
|
1688
|
+
remoteBlocksFetched?: Quantiles;
|
|
1689
|
+
remoteBytesRead?: Quantiles;
|
|
1690
|
+
remoteBytesReadToDisk?: Quantiles;
|
|
1691
|
+
remoteReqsDuration?: Quantiles;
|
|
1692
|
+
shufflePushReadMetrics?: ShufflePushReadQuantileMetrics;
|
|
1693
|
+
totalBlocksFetched?: Quantiles;
|
|
1694
|
+
}
|
|
1695
|
+
interface ShuffleWriteMetrics {
|
|
1696
|
+
bytesWritten?: string;
|
|
1697
|
+
recordsWritten?: string;
|
|
1698
|
+
writeTimeNanos?: string;
|
|
1699
|
+
}
|
|
1700
|
+
interface ShuffleWriteQuantileMetrics {
|
|
1701
|
+
writeBytes?: Quantiles;
|
|
1702
|
+
writeRecords?: Quantiles;
|
|
1703
|
+
writeTimeNanos?: Quantiles;
|
|
1704
|
+
}
|
|
1705
|
+
interface SinkProgress {
|
|
1706
|
+
description?: string;
|
|
1707
|
+
metrics?: {[P in string]: string};
|
|
1708
|
+
numOutputRows?: string;
|
|
1709
|
+
}
|
|
1206
1710
|
interface SoftwareConfig {
|
|
1207
1711
|
/** Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported-dataproc-image-versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version. */
|
|
1208
1712
|
imageVersion?: string;
|
|
@@ -1211,6 +1715,22 @@ declare namespace gapi.client {
|
|
|
1211
1715
|
/** Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties). */
|
|
1212
1716
|
properties?: {[P in string]: string};
|
|
1213
1717
|
}
|
|
1718
|
+
interface SourceProgress {
|
|
1719
|
+
description?: string;
|
|
1720
|
+
endOffset?: string;
|
|
1721
|
+
inputRowsPerSecond?: number;
|
|
1722
|
+
latestOffset?: string;
|
|
1723
|
+
metrics?: {[P in string]: string};
|
|
1724
|
+
numInputRows?: string;
|
|
1725
|
+
processedRowsPerSecond?: number;
|
|
1726
|
+
startOffset?: string;
|
|
1727
|
+
}
|
|
1728
|
+
interface SparkApplication {
|
|
1729
|
+
/** Output only. High level information corresponding to an application. */
|
|
1730
|
+
application?: ApplicationInfo;
|
|
1731
|
+
/** Identifier. Name of the spark application */
|
|
1732
|
+
name?: string;
|
|
1733
|
+
}
|
|
1214
1734
|
interface SparkBatch {
|
|
1215
1735
|
/** Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */
|
|
1216
1736
|
archiveUris?: string[];
|
|
@@ -1248,6 +1768,32 @@ declare namespace gapi.client {
|
|
|
1248
1768
|
/** Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. */
|
|
1249
1769
|
properties?: {[P in string]: string};
|
|
1250
1770
|
}
|
|
1771
|
+
interface SparkPlanGraph {
|
|
1772
|
+
edges?: SparkPlanGraphEdge[];
|
|
1773
|
+
executionId?: string;
|
|
1774
|
+
nodes?: SparkPlanGraphNodeWrapper[];
|
|
1775
|
+
}
|
|
1776
|
+
interface SparkPlanGraphCluster {
|
|
1777
|
+
desc?: string;
|
|
1778
|
+
metrics?: SqlPlanMetric[];
|
|
1779
|
+
name?: string;
|
|
1780
|
+
nodes?: SparkPlanGraphNodeWrapper[];
|
|
1781
|
+
sparkPlanGraphClusterId?: string;
|
|
1782
|
+
}
|
|
1783
|
+
interface SparkPlanGraphEdge {
|
|
1784
|
+
fromId?: string;
|
|
1785
|
+
toId?: string;
|
|
1786
|
+
}
|
|
1787
|
+
interface SparkPlanGraphNode {
|
|
1788
|
+
desc?: string;
|
|
1789
|
+
metrics?: SqlPlanMetric[];
|
|
1790
|
+
name?: string;
|
|
1791
|
+
sparkPlanGraphNodeId?: string;
|
|
1792
|
+
}
|
|
1793
|
+
interface SparkPlanGraphNodeWrapper {
|
|
1794
|
+
cluster?: SparkPlanGraphCluster;
|
|
1795
|
+
node?: SparkPlanGraphNode;
|
|
1796
|
+
}
|
|
1251
1797
|
interface SparkRBatch {
|
|
1252
1798
|
/** Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */
|
|
1253
1799
|
archiveUris?: string[];
|
|
@@ -1272,6 +1818,11 @@ declare namespace gapi.client {
|
|
|
1272
1818
|
/** Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. */
|
|
1273
1819
|
properties?: {[P in string]: string};
|
|
1274
1820
|
}
|
|
1821
|
+
interface SparkRuntimeInfo {
|
|
1822
|
+
javaHome?: string;
|
|
1823
|
+
javaVersion?: string;
|
|
1824
|
+
scalaVersion?: string;
|
|
1825
|
+
}
|
|
1275
1826
|
interface SparkSqlBatch {
|
|
1276
1827
|
/** Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. */
|
|
1277
1828
|
jarFileUris?: string[];
|
|
@@ -1308,6 +1859,168 @@ declare namespace gapi.client {
|
|
|
1308
1859
|
/** Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0. */
|
|
1309
1860
|
scaleUpMinWorkerFraction?: number;
|
|
1310
1861
|
}
|
|
1862
|
+
interface SparkWrapperObject {
|
|
1863
|
+
applicationEnvironmentInfo?: ApplicationEnvironmentInfo;
|
|
1864
|
+
/** Application Id created by Spark. */
|
|
1865
|
+
applicationId?: string;
|
|
1866
|
+
applicationInfo?: ApplicationInfo;
|
|
1867
|
+
appSummary?: AppSummary;
|
|
1868
|
+
/** VM Timestamp associated with the data object. */
|
|
1869
|
+
eventTimestamp?: string;
|
|
1870
|
+
executorStageSummary?: ExecutorStageSummary;
|
|
1871
|
+
executorSummary?: ExecutorSummary;
|
|
1872
|
+
jobData?: JobData;
|
|
1873
|
+
poolData?: PoolData;
|
|
1874
|
+
processSummary?: ProcessSummary;
|
|
1875
|
+
rddOperationGraph?: RddOperationGraph;
|
|
1876
|
+
rddStorageInfo?: RddStorageInfo;
|
|
1877
|
+
resourceProfileInfo?: ResourceProfileInfo;
|
|
1878
|
+
sparkPlanGraph?: SparkPlanGraph;
|
|
1879
|
+
speculationStageSummary?: SpeculationStageSummary;
|
|
1880
|
+
sqlExecutionUiData?: SqlExecutionUiData;
|
|
1881
|
+
stageData?: StageData;
|
|
1882
|
+
streamBlockData?: StreamBlockData;
|
|
1883
|
+
streamingQueryData?: StreamingQueryData;
|
|
1884
|
+
streamingQueryProgress?: StreamingQueryProgress;
|
|
1885
|
+
taskData?: TaskData;
|
|
1886
|
+
}
|
|
1887
|
+
interface SpeculationStageSummary {
|
|
1888
|
+
numActiveTasks?: number;
|
|
1889
|
+
numCompletedTasks?: number;
|
|
1890
|
+
numFailedTasks?: number;
|
|
1891
|
+
numKilledTasks?: number;
|
|
1892
|
+
numTasks?: number;
|
|
1893
|
+
stageAttemptId?: number;
|
|
1894
|
+
stageId?: string;
|
|
1895
|
+
}
|
|
1896
|
+
interface SqlExecutionUiData {
|
|
1897
|
+
completionTime?: string;
|
|
1898
|
+
description?: string;
|
|
1899
|
+
details?: string;
|
|
1900
|
+
errorMessage?: string;
|
|
1901
|
+
executionId?: string;
|
|
1902
|
+
jobs?: {[P in string]: string};
|
|
1903
|
+
metrics?: SqlPlanMetric[];
|
|
1904
|
+
metricValues?: {[P in string]: string};
|
|
1905
|
+
metricValuesIsNull?: boolean;
|
|
1906
|
+
modifiedConfigs?: {[P in string]: string};
|
|
1907
|
+
physicalPlanDescription?: string;
|
|
1908
|
+
rootExecutionId?: string;
|
|
1909
|
+
stages?: string[];
|
|
1910
|
+
submissionTime?: string;
|
|
1911
|
+
}
|
|
1912
|
+
interface SqlPlanMetric {
|
|
1913
|
+
accumulatorId?: string;
|
|
1914
|
+
metricType?: string;
|
|
1915
|
+
name?: string;
|
|
1916
|
+
}
|
|
1917
|
+
interface StageAttemptTasksSummary {
|
|
1918
|
+
applicationId?: string;
|
|
1919
|
+
numFailedTasks?: number;
|
|
1920
|
+
numKilledTasks?: number;
|
|
1921
|
+
numPendingTasks?: number;
|
|
1922
|
+
numRunningTasks?: number;
|
|
1923
|
+
numSuccessTasks?: number;
|
|
1924
|
+
numTasks?: number;
|
|
1925
|
+
stageAttemptId?: number;
|
|
1926
|
+
stageId?: string;
|
|
1927
|
+
}
|
|
1928
|
+
interface StageData {
|
|
1929
|
+
accumulatorUpdates?: AccumulableInfo[];
|
|
1930
|
+
completionTime?: string;
|
|
1931
|
+
description?: string;
|
|
1932
|
+
details?: string;
|
|
1933
|
+
executorMetricsDistributions?: ExecutorMetricsDistributions;
|
|
1934
|
+
executorSummary?: {[P in string]: ExecutorStageSummary};
|
|
1935
|
+
failureReason?: string;
|
|
1936
|
+
firstTaskLaunchedTime?: string;
|
|
1937
|
+
isShufflePushEnabled?: boolean;
|
|
1938
|
+
jobIds?: string[];
|
|
1939
|
+
killedTasksSummary?: {[P in string]: number};
|
|
1940
|
+
locality?: {[P in string]: string};
|
|
1941
|
+
name?: string;
|
|
1942
|
+
numActiveTasks?: number;
|
|
1943
|
+
numCompletedIndices?: number;
|
|
1944
|
+
numCompleteTasks?: number;
|
|
1945
|
+
numFailedTasks?: number;
|
|
1946
|
+
numKilledTasks?: number;
|
|
1947
|
+
numTasks?: number;
|
|
1948
|
+
parentStageIds?: string[];
|
|
1949
|
+
peakExecutorMetrics?: ExecutorMetrics;
|
|
1950
|
+
rddIds?: string[];
|
|
1951
|
+
resourceProfileId?: number;
|
|
1952
|
+
schedulingPool?: string;
|
|
1953
|
+
shuffleMergersCount?: number;
|
|
1954
|
+
speculationSummary?: SpeculationStageSummary;
|
|
1955
|
+
stageAttemptId?: number;
|
|
1956
|
+
stageId?: string;
|
|
1957
|
+
stageMetrics?: StageMetrics;
|
|
1958
|
+
status?: string;
|
|
1959
|
+
submissionTime?: string;
|
|
1960
|
+
/** Summary metrics fields. These are included in response only if present in summary_metrics_mask field in request */
|
|
1961
|
+
taskQuantileMetrics?: TaskQuantileMetrics;
|
|
1962
|
+
tasks?: {[P in string]: TaskData};
|
|
1963
|
+
}
|
|
1964
|
+
interface StageInputMetrics {
|
|
1965
|
+
bytesRead?: string;
|
|
1966
|
+
recordsRead?: string;
|
|
1967
|
+
}
|
|
1968
|
+
interface StageMetrics {
|
|
1969
|
+
diskBytesSpilled?: string;
|
|
1970
|
+
executorCpuTimeNanos?: string;
|
|
1971
|
+
executorDeserializeCpuTimeNanos?: string;
|
|
1972
|
+
executorDeserializeTimeMillis?: string;
|
|
1973
|
+
executorRunTimeMillis?: string;
|
|
1974
|
+
jvmGcTimeMillis?: string;
|
|
1975
|
+
memoryBytesSpilled?: string;
|
|
1976
|
+
peakExecutionMemoryBytes?: string;
|
|
1977
|
+
resultSerializationTimeMillis?: string;
|
|
1978
|
+
resultSize?: string;
|
|
1979
|
+
stageInputMetrics?: StageInputMetrics;
|
|
1980
|
+
stageOutputMetrics?: StageOutputMetrics;
|
|
1981
|
+
stageShuffleReadMetrics?: StageShuffleReadMetrics;
|
|
1982
|
+
stageShuffleWriteMetrics?: StageShuffleWriteMetrics;
|
|
1983
|
+
}
|
|
1984
|
+
interface StageOutputMetrics {
|
|
1985
|
+
bytesWritten?: string;
|
|
1986
|
+
recordsWritten?: string;
|
|
1987
|
+
}
|
|
1988
|
+
interface StageShufflePushReadMetrics {
|
|
1989
|
+
corruptMergedBlockChunks?: string;
|
|
1990
|
+
localMergedBlocksFetched?: string;
|
|
1991
|
+
localMergedBytesRead?: string;
|
|
1992
|
+
localMergedChunksFetched?: string;
|
|
1993
|
+
mergedFetchFallbackCount?: string;
|
|
1994
|
+
remoteMergedBlocksFetched?: string;
|
|
1995
|
+
remoteMergedBytesRead?: string;
|
|
1996
|
+
remoteMergedChunksFetched?: string;
|
|
1997
|
+
remoteMergedReqsDuration?: string;
|
|
1998
|
+
}
|
|
1999
|
+
interface StageShuffleReadMetrics {
|
|
2000
|
+
bytesRead?: string;
|
|
2001
|
+
fetchWaitTimeMillis?: string;
|
|
2002
|
+
localBlocksFetched?: string;
|
|
2003
|
+
localBytesRead?: string;
|
|
2004
|
+
recordsRead?: string;
|
|
2005
|
+
remoteBlocksFetched?: string;
|
|
2006
|
+
remoteBytesRead?: string;
|
|
2007
|
+
remoteBytesReadToDisk?: string;
|
|
2008
|
+
remoteReqsDuration?: string;
|
|
2009
|
+
stageShufflePushReadMetrics?: StageShufflePushReadMetrics;
|
|
2010
|
+
}
|
|
2011
|
+
interface StageShuffleWriteMetrics {
|
|
2012
|
+
bytesWritten?: string;
|
|
2013
|
+
recordsWritten?: string;
|
|
2014
|
+
writeTimeNanos?: string;
|
|
2015
|
+
}
|
|
2016
|
+
interface StagesSummary {
|
|
2017
|
+
applicationId?: string;
|
|
2018
|
+
numActiveStages?: number;
|
|
2019
|
+
numCompletedStages?: number;
|
|
2020
|
+
numFailedStages?: number;
|
|
2021
|
+
numPendingStages?: number;
|
|
2022
|
+
numSkippedStages?: number;
|
|
2023
|
+
}
|
|
1311
2024
|
interface StartClusterRequest {
|
|
1312
2025
|
/** Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist. */
|
|
1313
2026
|
clusterUuid?: string;
|
|
@@ -1326,6 +2039,20 @@ declare namespace gapi.client {
|
|
|
1326
2039
|
/** Output only. The time when the batch entered the historical state. */
|
|
1327
2040
|
stateStartTime?: string;
|
|
1328
2041
|
}
|
|
2042
|
+
interface StateOperatorProgress {
|
|
2043
|
+
allRemovalsTimeMs?: string;
|
|
2044
|
+
allUpdatesTimeMs?: string;
|
|
2045
|
+
commitTimeMs?: string;
|
|
2046
|
+
customMetrics?: {[P in string]: string};
|
|
2047
|
+
memoryUsedBytes?: string;
|
|
2048
|
+
numRowsDroppedByWatermark?: string;
|
|
2049
|
+
numRowsRemoved?: string;
|
|
2050
|
+
numRowsTotal?: string;
|
|
2051
|
+
numRowsUpdated?: string;
|
|
2052
|
+
numShufflePartitions?: string;
|
|
2053
|
+
numStateStoreInstances?: string;
|
|
2054
|
+
operatorName?: string;
|
|
2055
|
+
}
|
|
1329
2056
|
interface Status {
|
|
1330
2057
|
/** The status code, which should be an enum value of google.rpc.Code. */
|
|
1331
2058
|
code?: number;
|
|
@@ -1340,12 +2067,152 @@ declare namespace gapi.client {
|
|
|
1340
2067
|
/** Optional. A unique ID used to identify the request. If the server receives two StopClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. */
|
|
1341
2068
|
requestId?: string;
|
|
1342
2069
|
}
|
|
2070
|
+
interface StreamBlockData {
|
|
2071
|
+
deserialized?: boolean;
|
|
2072
|
+
diskSize?: string;
|
|
2073
|
+
executorId?: string;
|
|
2074
|
+
hostPort?: string;
|
|
2075
|
+
memSize?: string;
|
|
2076
|
+
name?: string;
|
|
2077
|
+
storageLevel?: string;
|
|
2078
|
+
useDisk?: boolean;
|
|
2079
|
+
useMemory?: boolean;
|
|
2080
|
+
}
|
|
2081
|
+
interface StreamingQueryData {
|
|
2082
|
+
endTimestamp?: string;
|
|
2083
|
+
exception?: string;
|
|
2084
|
+
isActive?: boolean;
|
|
2085
|
+
name?: string;
|
|
2086
|
+
runId?: string;
|
|
2087
|
+
startTimestamp?: string;
|
|
2088
|
+
streamingQueryId?: string;
|
|
2089
|
+
}
|
|
2090
|
+
interface StreamingQueryProgress {
|
|
2091
|
+
batchDuration?: string;
|
|
2092
|
+
batchId?: string;
|
|
2093
|
+
durationMillis?: {[P in string]: string};
|
|
2094
|
+
eventTime?: {[P in string]: string};
|
|
2095
|
+
name?: string;
|
|
2096
|
+
observedMetrics?: {[P in string]: string};
|
|
2097
|
+
runId?: string;
|
|
2098
|
+
sink?: SinkProgress;
|
|
2099
|
+
sources?: SourceProgress[];
|
|
2100
|
+
stateOperators?: StateOperatorProgress[];
|
|
2101
|
+
streamingQueryProgressId?: string;
|
|
2102
|
+
timestamp?: string;
|
|
2103
|
+
}
|
|
1343
2104
|
interface SubmitJobRequest {
|
|
1344
2105
|
/** Required. The job resource. */
|
|
1345
2106
|
job?: Job;
|
|
1346
2107
|
/** Optional. A unique id used to identify the request. If the server receives two SubmitJobRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s with the same id, then the second request will be ignored and the first Job created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. */
|
|
1347
2108
|
requestId?: string;
|
|
1348
2109
|
}
|
|
2110
|
+
interface SummarizeSessionSparkApplicationExecutorsResponse {
|
|
2111
|
+
/** Consolidated summary for active executors. */
|
|
2112
|
+
activeExecutorSummary?: ConsolidatedExecutorSummary;
|
|
2113
|
+
/** Spark Application Id */
|
|
2114
|
+
applicationId?: string;
|
|
2115
|
+
/** Consolidated summary for dead executors. */
|
|
2116
|
+
deadExecutorSummary?: ConsolidatedExecutorSummary;
|
|
2117
|
+
/** Overall consolidated summary for all executors. */
|
|
2118
|
+
totalExecutorSummary?: ConsolidatedExecutorSummary;
|
|
2119
|
+
}
|
|
2120
|
+
interface SummarizeSessionSparkApplicationJobsResponse {
|
|
2121
|
+
/** Summary of a Spark Application Jobs */
|
|
2122
|
+
jobsSummary?: JobsSummary;
|
|
2123
|
+
}
|
|
2124
|
+
interface SummarizeSessionSparkApplicationStageAttemptTasksResponse {
|
|
2125
|
+
/** Summary of tasks for a Spark Application Stage Attempt */
|
|
2126
|
+
stageAttemptTasksSummary?: StageAttemptTasksSummary;
|
|
2127
|
+
}
|
|
2128
|
+
interface SummarizeSessionSparkApplicationStagesResponse {
|
|
2129
|
+
/** Summary of a Spark Application Stages */
|
|
2130
|
+
stagesSummary?: StagesSummary;
|
|
2131
|
+
}
|
|
2132
|
+
interface SummarizeSparkApplicationExecutorsResponse {
|
|
2133
|
+
/** Consolidated summary for active executors. */
|
|
2134
|
+
activeExecutorSummary?: ConsolidatedExecutorSummary;
|
|
2135
|
+
/** Spark Application Id */
|
|
2136
|
+
applicationId?: string;
|
|
2137
|
+
/** Consolidated summary for dead executors. */
|
|
2138
|
+
deadExecutorSummary?: ConsolidatedExecutorSummary;
|
|
2139
|
+
/** Overall consolidated summary for all executors. */
|
|
2140
|
+
totalExecutorSummary?: ConsolidatedExecutorSummary;
|
|
2141
|
+
}
|
|
2142
|
+
interface SummarizeSparkApplicationJobsResponse {
|
|
2143
|
+
/** Summary of a Spark Application Jobs */
|
|
2144
|
+
jobsSummary?: JobsSummary;
|
|
2145
|
+
}
|
|
2146
|
+
interface SummarizeSparkApplicationStageAttemptTasksResponse {
|
|
2147
|
+
/** Summary of tasks for a Spark Application Stage Attempt */
|
|
2148
|
+
stageAttemptTasksSummary?: StageAttemptTasksSummary;
|
|
2149
|
+
}
|
|
2150
|
+
interface SummarizeSparkApplicationStagesResponse {
|
|
2151
|
+
/** Summary of a Spark Application Stages */
|
|
2152
|
+
stagesSummary?: StagesSummary;
|
|
2153
|
+
}
|
|
2154
|
+
interface TaskData {
|
|
2155
|
+
accumulatorUpdates?: AccumulableInfo[];
|
|
2156
|
+
attempt?: number;
|
|
2157
|
+
durationMillis?: string;
|
|
2158
|
+
errorMessage?: string;
|
|
2159
|
+
executorId?: string;
|
|
2160
|
+
executorLogs?: {[P in string]: string};
|
|
2161
|
+
gettingResultTimeMillis?: string;
|
|
2162
|
+
hasMetrics?: boolean;
|
|
2163
|
+
host?: string;
|
|
2164
|
+
index?: number;
|
|
2165
|
+
launchTime?: string;
|
|
2166
|
+
partitionId?: number;
|
|
2167
|
+
resultFetchStart?: string;
|
|
2168
|
+
schedulerDelayMillis?: string;
|
|
2169
|
+
speculative?: boolean;
|
|
2170
|
+
stageAttemptId?: number;
|
|
2171
|
+
stageId?: string;
|
|
2172
|
+
status?: string;
|
|
2173
|
+
taskId?: string;
|
|
2174
|
+
taskLocality?: string;
|
|
2175
|
+
taskMetrics?: TaskMetrics;
|
|
2176
|
+
}
|
|
2177
|
+
interface TaskMetrics {
|
|
2178
|
+
diskBytesSpilled?: string;
|
|
2179
|
+
executorCpuTimeNanos?: string;
|
|
2180
|
+
executorDeserializeCpuTimeNanos?: string;
|
|
2181
|
+
executorDeserializeTimeMillis?: string;
|
|
2182
|
+
executorRunTimeMillis?: string;
|
|
2183
|
+
inputMetrics?: InputMetrics;
|
|
2184
|
+
jvmGcTimeMillis?: string;
|
|
2185
|
+
memoryBytesSpilled?: string;
|
|
2186
|
+
outputMetrics?: OutputMetrics;
|
|
2187
|
+
peakExecutionMemoryBytes?: string;
|
|
2188
|
+
resultSerializationTimeMillis?: string;
|
|
2189
|
+
resultSize?: string;
|
|
2190
|
+
shuffleReadMetrics?: ShuffleReadMetrics;
|
|
2191
|
+
shuffleWriteMetrics?: ShuffleWriteMetrics;
|
|
2192
|
+
}
|
|
2193
|
+
interface TaskQuantileMetrics {
|
|
2194
|
+
diskBytesSpilled?: Quantiles;
|
|
2195
|
+
durationMillis?: Quantiles;
|
|
2196
|
+
executorCpuTimeNanos?: Quantiles;
|
|
2197
|
+
executorDeserializeCpuTimeNanos?: Quantiles;
|
|
2198
|
+
executorDeserializeTimeMillis?: Quantiles;
|
|
2199
|
+
executorRunTimeMillis?: Quantiles;
|
|
2200
|
+
gettingResultTimeMillis?: Quantiles;
|
|
2201
|
+
inputMetrics?: InputQuantileMetrics;
|
|
2202
|
+
jvmGcTimeMillis?: Quantiles;
|
|
2203
|
+
memoryBytesSpilled?: Quantiles;
|
|
2204
|
+
outputMetrics?: OutputQuantileMetrics;
|
|
2205
|
+
peakExecutionMemoryBytes?: Quantiles;
|
|
2206
|
+
resultSerializationTimeMillis?: Quantiles;
|
|
2207
|
+
resultSize?: Quantiles;
|
|
2208
|
+
schedulerDelayMillis?: Quantiles;
|
|
2209
|
+
shuffleReadMetrics?: ShuffleReadQuantileMetrics;
|
|
2210
|
+
shuffleWriteMetrics?: ShuffleWriteQuantileMetrics;
|
|
2211
|
+
}
|
|
2212
|
+
interface TaskResourceRequest {
|
|
2213
|
+
amount?: number;
|
|
2214
|
+
resourceName?: string;
|
|
2215
|
+
}
|
|
1349
2216
|
interface TemplateParameter {
|
|
1350
2217
|
/** Optional. Brief description of the parameter. Must not exceed 1024 characters. */
|
|
1351
2218
|
description?: string;
|
|
@@ -1497,6 +2364,19 @@ declare namespace gapi.client {
|
|
|
1497
2364
|
/** A cluster that is managed by the workflow. */
|
|
1498
2365
|
managedCluster?: ManagedCluster;
|
|
1499
2366
|
}
|
|
2367
|
+
interface WriteSessionSparkApplicationContextRequest {
|
|
2368
|
+
/** Required. Parent (Batch) resource reference. */
|
|
2369
|
+
parent?: string;
|
|
2370
|
+
/** Required. The batch of spark application context objects sent for ingestion. */
|
|
2371
|
+
sparkWrapperObjects?: SparkWrapperObject[];
|
|
2372
|
+
}
|
|
2373
|
+
interface WriteSessionSparkApplicationContextResponse {}
|
|
2374
|
+
interface WriteSparkApplicationContextRequest {
|
|
2375
|
+
/** Required. Parent (Batch) resource reference. */
|
|
2376
|
+
parent?: string;
|
|
2377
|
+
sparkWrapperObjects?: SparkWrapperObject[];
|
|
2378
|
+
}
|
|
2379
|
+
interface WriteSparkApplicationContextResponse {}
|
|
1500
2380
|
interface YarnApplication {
|
|
1501
2381
|
/** Required. The application name. */
|
|
1502
2382
|
name?: string;
|
|
@@ -1800,9 +2680,9 @@ declare namespace gapi.client {
|
|
|
1800
2680
|
body: AutoscalingPolicy
|
|
1801
2681
|
): Request<AutoscalingPolicy>;
|
|
1802
2682
|
}
|
|
1803
|
-
interface
|
|
1804
|
-
/**
|
|
1805
|
-
|
|
2683
|
+
interface SparkApplicationsResource {
|
|
2684
|
+
/** Obtain high level information corresponding to a single Spark Application. */
|
|
2685
|
+
access(request?: {
|
|
1806
2686
|
/** V1 error format. */
|
|
1807
2687
|
'$.xgafv'?: string;
|
|
1808
2688
|
/** OAuth access token. */
|
|
@@ -1815,10 +2695,12 @@ declare namespace gapi.client {
|
|
|
1815
2695
|
fields?: string;
|
|
1816
2696
|
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
1817
2697
|
key?: string;
|
|
1818
|
-
/** Required. The fully qualified name of the batch to
|
|
2698
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
1819
2699
|
name: string;
|
|
1820
2700
|
/** OAuth 2.0 token for the current user. */
|
|
1821
2701
|
oauth_token?: string;
|
|
2702
|
+
/** Required. Parent (Batch) resource reference. */
|
|
2703
|
+
parent?: string;
|
|
1822
2704
|
/** Returns response with indentations and line breaks. */
|
|
1823
2705
|
prettyPrint?: boolean;
|
|
1824
2706
|
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
@@ -1827,106 +2709,38 @@ declare namespace gapi.client {
|
|
|
1827
2709
|
upload_protocol?: string;
|
|
1828
2710
|
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
1829
2711
|
uploadType?: string;
|
|
1830
|
-
|
|
1831
|
-
|
|
1832
|
-
|
|
1833
|
-
analyze(
|
|
1834
|
-
request: {
|
|
1835
|
-
/** V1 error format. */
|
|
1836
|
-
'$.xgafv'?: string;
|
|
1837
|
-
/** OAuth access token. */
|
|
1838
|
-
access_token?: string;
|
|
1839
|
-
/** Data format for response. */
|
|
1840
|
-
alt?: string;
|
|
1841
|
-
/** JSONP */
|
|
1842
|
-
callback?: string;
|
|
1843
|
-
/** Selector specifying which fields to include in a partial response. */
|
|
1844
|
-
fields?: string;
|
|
1845
|
-
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
1846
|
-
key?: string;
|
|
1847
|
-
/** Required. The fully qualified name of the batch to analyze in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
|
|
1848
|
-
name: string;
|
|
1849
|
-
/** OAuth 2.0 token for the current user. */
|
|
1850
|
-
oauth_token?: string;
|
|
1851
|
-
/** Returns response with indentations and line breaks. */
|
|
1852
|
-
prettyPrint?: boolean;
|
|
1853
|
-
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
1854
|
-
quotaUser?: string;
|
|
1855
|
-
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
1856
|
-
upload_protocol?: string;
|
|
1857
|
-
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
1858
|
-
uploadType?: string;
|
|
1859
|
-
},
|
|
1860
|
-
body: AnalyzeBatchRequest
|
|
1861
|
-
): Request<Operation>;
|
|
1862
|
-
/** Creates a batch workload that executes asynchronously. */
|
|
1863
|
-
create(request: {
|
|
2712
|
+
}): Request<AccessSparkApplicationResponse>;
|
|
2713
|
+
/** Obtain environment details for a Spark Application */
|
|
2714
|
+
accessEnvironmentInfo(request?: {
|
|
1864
2715
|
/** V1 error format. */
|
|
1865
2716
|
'$.xgafv'?: string;
|
|
1866
2717
|
/** OAuth access token. */
|
|
1867
2718
|
access_token?: string;
|
|
1868
2719
|
/** Data format for response. */
|
|
1869
2720
|
alt?: string;
|
|
1870
|
-
/** Optional. The ID to use for the batch, which will become the final component of the batch's resource name.This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/. */
|
|
1871
|
-
batchId?: string;
|
|
1872
2721
|
/** JSONP */
|
|
1873
2722
|
callback?: string;
|
|
1874
2723
|
/** Selector specifying which fields to include in a partial response. */
|
|
1875
2724
|
fields?: string;
|
|
1876
2725
|
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
1877
2726
|
key?: string;
|
|
2727
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
2728
|
+
name: string;
|
|
1878
2729
|
/** OAuth 2.0 token for the current user. */
|
|
1879
2730
|
oauth_token?: string;
|
|
1880
|
-
/** Required.
|
|
1881
|
-
parent
|
|
2731
|
+
/** Required. Parent (Batch) resource reference. */
|
|
2732
|
+
parent?: string;
|
|
1882
2733
|
/** Returns response with indentations and line breaks. */
|
|
1883
2734
|
prettyPrint?: boolean;
|
|
1884
2735
|
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
1885
2736
|
quotaUser?: string;
|
|
1886
|
-
/** Optional. A unique ID used to identify the request. If the service receives two CreateBatchRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first Batch created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. */
|
|
1887
|
-
requestId?: string;
|
|
1888
2737
|
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
1889
2738
|
upload_protocol?: string;
|
|
1890
2739
|
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
1891
2740
|
uploadType?: string;
|
|
1892
|
-
|
|
1893
|
-
|
|
1894
|
-
|
|
1895
|
-
create(
|
|
1896
|
-
request: {
|
|
1897
|
-
/** V1 error format. */
|
|
1898
|
-
'$.xgafv'?: string;
|
|
1899
|
-
/** OAuth access token. */
|
|
1900
|
-
access_token?: string;
|
|
1901
|
-
/** Data format for response. */
|
|
1902
|
-
alt?: string;
|
|
1903
|
-
/** Optional. The ID to use for the batch, which will become the final component of the batch's resource name.This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/. */
|
|
1904
|
-
batchId?: string;
|
|
1905
|
-
/** JSONP */
|
|
1906
|
-
callback?: string;
|
|
1907
|
-
/** Selector specifying which fields to include in a partial response. */
|
|
1908
|
-
fields?: string;
|
|
1909
|
-
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
1910
|
-
key?: string;
|
|
1911
|
-
/** OAuth 2.0 token for the current user. */
|
|
1912
|
-
oauth_token?: string;
|
|
1913
|
-
/** Required. The parent resource where this batch will be created. */
|
|
1914
|
-
parent: string;
|
|
1915
|
-
/** Returns response with indentations and line breaks. */
|
|
1916
|
-
prettyPrint?: boolean;
|
|
1917
|
-
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
1918
|
-
quotaUser?: string;
|
|
1919
|
-
/** Optional. A unique ID used to identify the request. If the service receives two CreateBatchRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first Batch created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. */
|
|
1920
|
-
requestId?: string;
|
|
1921
|
-
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
1922
|
-
upload_protocol?: string;
|
|
1923
|
-
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
1924
|
-
uploadType?: string;
|
|
1925
|
-
},
|
|
1926
|
-
body: Batch
|
|
1927
|
-
): Request<Operation>;
|
|
1928
|
-
/** Deletes the batch workload resource. If the batch is not in a CANCELLED, SUCCEEDED or FAILED State, the delete operation fails and the response returns FAILED_PRECONDITION. */
|
|
1929
|
-
delete(request?: {
|
|
2741
|
+
}): Request<AccessSparkApplicationEnvironmentInfoResponse>;
|
|
2742
|
+
/** Obtain data corresponding to a spark job for a Spark Application. */
|
|
2743
|
+
accessJob(request?: {
|
|
1930
2744
|
/** V1 error format. */
|
|
1931
2745
|
'$.xgafv'?: string;
|
|
1932
2746
|
/** OAuth access token. */
|
|
@@ -1937,12 +2751,16 @@ declare namespace gapi.client {
|
|
|
1937
2751
|
callback?: string;
|
|
1938
2752
|
/** Selector specifying which fields to include in a partial response. */
|
|
1939
2753
|
fields?: string;
|
|
2754
|
+
/** Required. Job ID to fetch data for. */
|
|
2755
|
+
jobId?: string;
|
|
1940
2756
|
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
1941
2757
|
key?: string;
|
|
1942
|
-
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
|
|
2758
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
1943
2759
|
name: string;
|
|
1944
2760
|
/** OAuth 2.0 token for the current user. */
|
|
1945
2761
|
oauth_token?: string;
|
|
2762
|
+
/** Required. Parent (Batch) resource reference. */
|
|
2763
|
+
parent?: string;
|
|
1946
2764
|
/** Returns response with indentations and line breaks. */
|
|
1947
2765
|
prettyPrint?: boolean;
|
|
1948
2766
|
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
@@ -1951,9 +2769,9 @@ declare namespace gapi.client {
|
|
|
1951
2769
|
upload_protocol?: string;
|
|
1952
2770
|
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
1953
2771
|
uploadType?: string;
|
|
1954
|
-
}): Request<
|
|
1955
|
-
/**
|
|
1956
|
-
|
|
2772
|
+
}): Request<AccessSparkApplicationJobResponse>;
|
|
2773
|
+
/** Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000. */
|
|
2774
|
+
accessSqlPlan(request?: {
|
|
1957
2775
|
/** V1 error format. */
|
|
1958
2776
|
'$.xgafv'?: string;
|
|
1959
2777
|
/** OAuth access token. */
|
|
@@ -1962,14 +2780,18 @@ declare namespace gapi.client {
|
|
|
1962
2780
|
alt?: string;
|
|
1963
2781
|
/** JSONP */
|
|
1964
2782
|
callback?: string;
|
|
2783
|
+
/** Required. Execution ID */
|
|
2784
|
+
executionId?: string;
|
|
1965
2785
|
/** Selector specifying which fields to include in a partial response. */
|
|
1966
2786
|
fields?: string;
|
|
1967
2787
|
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
1968
2788
|
key?: string;
|
|
1969
|
-
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
|
|
2789
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
1970
2790
|
name: string;
|
|
1971
2791
|
/** OAuth 2.0 token for the current user. */
|
|
1972
2792
|
oauth_token?: string;
|
|
2793
|
+
/** Required. Parent (Batch) resource reference. */
|
|
2794
|
+
parent?: string;
|
|
1973
2795
|
/** Returns response with indentations and line breaks. */
|
|
1974
2796
|
prettyPrint?: boolean;
|
|
1975
2797
|
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
@@ -1978,9 +2800,9 @@ declare namespace gapi.client {
|
|
|
1978
2800
|
upload_protocol?: string;
|
|
1979
2801
|
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
1980
2802
|
uploadType?: string;
|
|
1981
|
-
}): Request<
|
|
1982
|
-
/**
|
|
1983
|
-
|
|
2803
|
+
}): Request<AccessSparkApplicationSqlSparkPlanGraphResponse>;
|
|
2804
|
+
/** Obtain data corresponding to a particular SQL Query for a Spark Application. */
|
|
2805
|
+
accessSqlQuery(request?: {
|
|
1984
2806
|
/** V1 error format. */
|
|
1985
2807
|
'$.xgafv'?: string;
|
|
1986
2808
|
/** OAuth access token. */
|
|
@@ -1989,22 +2811,22 @@ declare namespace gapi.client {
|
|
|
1989
2811
|
alt?: string;
|
|
1990
2812
|
/** JSONP */
|
|
1991
2813
|
callback?: string;
|
|
2814
|
+
/** Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide. */
|
|
2815
|
+
details?: boolean;
|
|
2816
|
+
/** Required. Execution ID */
|
|
2817
|
+
executionId?: string;
|
|
1992
2818
|
/** Selector specifying which fields to include in a partial response. */
|
|
1993
2819
|
fields?: string;
|
|
1994
|
-
/** Optional. A filter for the batches to return in the response.A filter is a logical expression constraining the values of various fields in each batch resource. Filters are case sensitive, and may contain multiple clauses combined with logical operators (AND/OR). Supported fields are batch_id, batch_uuid, state, create_time, and labels.e.g. state = RUNNING and create_time < "2023-01-01T00:00:00Z" filters for batches in state RUNNING that were created before 2023-01-01. state = RUNNING and labels.environment=production filters for batches in state in a RUNNING state that have a production environment label.See https://google.aip.dev/assets/misc/ebnf-filtering.txt for a detailed description of the filter syntax and a list of supported comparisons. */
|
|
1995
|
-
filter?: string;
|
|
1996
2820
|
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
1997
2821
|
key?: string;
|
|
2822
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
2823
|
+
name: string;
|
|
1998
2824
|
/** OAuth 2.0 token for the current user. */
|
|
1999
2825
|
oauth_token?: string;
|
|
2000
|
-
/**
|
|
2001
|
-
|
|
2002
|
-
/** Optional.
|
|
2003
|
-
|
|
2004
|
-
/** Optional. A page token received from a previous ListBatches call. Provide this token to retrieve the subsequent page. */
|
|
2005
|
-
pageToken?: string;
|
|
2006
|
-
/** Required. The parent, which owns this collection of batches. */
|
|
2007
|
-
parent: string;
|
|
2826
|
+
/** Required. Parent (Batch) resource reference. */
|
|
2827
|
+
parent?: string;
|
|
2828
|
+
/** Optional. Enables/ disables physical plan description on demand */
|
|
2829
|
+
planDescription?: boolean;
|
|
2008
2830
|
/** Returns response with indentations and line breaks. */
|
|
2009
2831
|
prettyPrint?: boolean;
|
|
2010
2832
|
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
@@ -2013,11 +2835,9 @@ declare namespace gapi.client {
|
|
|
2013
2835
|
upload_protocol?: string;
|
|
2014
2836
|
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
2015
2837
|
uploadType?: string;
|
|
2016
|
-
}): Request<
|
|
2017
|
-
|
|
2018
|
-
|
|
2019
|
-
/** Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED. */
|
|
2020
|
-
cancel(request?: {
|
|
2838
|
+
}): Request<AccessSparkApplicationSqlQueryResponse>;
|
|
2839
|
+
/** Obtain data corresponding to a spark stage attempt for a Spark Application. */
|
|
2840
|
+
accessStageAttempt(request?: {
|
|
2021
2841
|
/** V1 error format. */
|
|
2022
2842
|
'$.xgafv'?: string;
|
|
2023
2843
|
/** OAuth access token. */
|
|
@@ -2030,21 +2850,29 @@ declare namespace gapi.client {
|
|
|
2030
2850
|
fields?: string;
|
|
2031
2851
|
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
2032
2852
|
key?: string;
|
|
2033
|
-
/** The name of the
|
|
2853
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
2034
2854
|
name: string;
|
|
2035
2855
|
/** OAuth 2.0 token for the current user. */
|
|
2036
2856
|
oauth_token?: string;
|
|
2857
|
+
/** Required. Parent (Batch) resource reference. */
|
|
2858
|
+
parent?: string;
|
|
2037
2859
|
/** Returns response with indentations and line breaks. */
|
|
2038
2860
|
prettyPrint?: boolean;
|
|
2039
2861
|
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
2040
2862
|
quotaUser?: string;
|
|
2863
|
+
/** Required. Stage Attempt ID */
|
|
2864
|
+
stageAttemptId?: number;
|
|
2865
|
+
/** Required. Stage ID */
|
|
2866
|
+
stageId?: string;
|
|
2867
|
+
/** Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field */
|
|
2868
|
+
summaryMetricsMask?: string;
|
|
2041
2869
|
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
2042
2870
|
upload_protocol?: string;
|
|
2043
2871
|
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
2044
2872
|
uploadType?: string;
|
|
2045
|
-
}): Request<
|
|
2046
|
-
/**
|
|
2047
|
-
|
|
2873
|
+
}): Request<AccessSparkApplicationStageAttemptResponse>;
|
|
2874
|
+
/** Obtain RDD operation graph for a Spark Application Stage. Limits the number of clusters returned as part of the graph to 10000. */
|
|
2875
|
+
accessStageRddGraph(request?: {
|
|
2048
2876
|
/** V1 error format. */
|
|
2049
2877
|
'$.xgafv'?: string;
|
|
2050
2878
|
/** OAuth access token. */
|
|
@@ -2057,37 +2885,55 @@ declare namespace gapi.client {
|
|
|
2057
2885
|
fields?: string;
|
|
2058
2886
|
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
2059
2887
|
key?: string;
|
|
2060
|
-
/** The name of the
|
|
2888
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
2061
2889
|
name: string;
|
|
2062
2890
|
/** OAuth 2.0 token for the current user. */
|
|
2063
2891
|
oauth_token?: string;
|
|
2892
|
+
/** Required. Parent (Batch) resource reference. */
|
|
2893
|
+
parent?: string;
|
|
2064
2894
|
/** Returns response with indentations and line breaks. */
|
|
2065
2895
|
prettyPrint?: boolean;
|
|
2066
2896
|
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
2067
2897
|
quotaUser?: string;
|
|
2898
|
+
/** Required. Stage ID */
|
|
2899
|
+
stageId?: string;
|
|
2068
2900
|
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
2069
2901
|
upload_protocol?: string;
|
|
2070
2902
|
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
2071
2903
|
uploadType?: string;
|
|
2072
|
-
}): Request<
|
|
2073
|
-
/**
|
|
2074
|
-
|
|
2904
|
+
}): Request<AccessSparkApplicationStageRddOperationGraphResponse>;
|
|
2905
|
+
/** Obtain high level information and list of Spark Applications corresponding to a batch */
|
|
2906
|
+
search(request?: {
|
|
2075
2907
|
/** V1 error format. */
|
|
2076
2908
|
'$.xgafv'?: string;
|
|
2077
2909
|
/** OAuth access token. */
|
|
2078
2910
|
access_token?: string;
|
|
2079
2911
|
/** Data format for response. */
|
|
2080
2912
|
alt?: string;
|
|
2913
|
+
/** Optional. Search only applications in the chosen state. */
|
|
2914
|
+
applicationStatus?: string;
|
|
2081
2915
|
/** JSONP */
|
|
2082
2916
|
callback?: string;
|
|
2083
2917
|
/** Selector specifying which fields to include in a partial response. */
|
|
2084
2918
|
fields?: string;
|
|
2085
2919
|
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
2086
2920
|
key?: string;
|
|
2087
|
-
/**
|
|
2088
|
-
|
|
2921
|
+
/** Optional. Latest end timestamp to list. */
|
|
2922
|
+
maxEndTime?: string;
|
|
2923
|
+
/** Optional. Latest start timestamp to list. */
|
|
2924
|
+
maxTime?: string;
|
|
2925
|
+
/** Optional. Earliest end timestamp to list. */
|
|
2926
|
+
minEndTime?: string;
|
|
2927
|
+
/** Optional. Earliest start timestamp to list. */
|
|
2928
|
+
minTime?: string;
|
|
2089
2929
|
/** OAuth 2.0 token for the current user. */
|
|
2090
2930
|
oauth_token?: string;
|
|
2931
|
+
/** Optional. Maximum number of applications to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
2932
|
+
pageSize?: number;
|
|
2933
|
+
/** Optional. A page token received from a previous SearchSparkApplications call. Provide this token to retrieve the subsequent page. */
|
|
2934
|
+
pageToken?: string;
|
|
2935
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
|
|
2936
|
+
parent: string;
|
|
2091
2937
|
/** Returns response with indentations and line breaks. */
|
|
2092
2938
|
prettyPrint?: boolean;
|
|
2093
2939
|
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
@@ -2096,9 +2942,9 @@ declare namespace gapi.client {
|
|
|
2096
2942
|
upload_protocol?: string;
|
|
2097
2943
|
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
2098
2944
|
uploadType?: string;
|
|
2099
|
-
}): Request<
|
|
2100
|
-
/**
|
|
2101
|
-
|
|
2945
|
+
}): Request<SearchSparkApplicationsResponse>;
|
|
2946
|
+
/** Obtain data corresponding to executors for a Spark Application. */
|
|
2947
|
+
searchExecutors(request?: {
|
|
2102
2948
|
/** V1 error format. */
|
|
2103
2949
|
'$.xgafv'?: string;
|
|
2104
2950
|
/** OAuth access token. */
|
|
@@ -2107,20 +2953,22 @@ declare namespace gapi.client {
|
|
|
2107
2953
|
alt?: string;
|
|
2108
2954
|
/** JSONP */
|
|
2109
2955
|
callback?: string;
|
|
2956
|
+
/** Optional. Filter to select whether active/ dead or all executors should be selected. */
|
|
2957
|
+
executorStatus?: string;
|
|
2110
2958
|
/** Selector specifying which fields to include in a partial response. */
|
|
2111
2959
|
fields?: string;
|
|
2112
|
-
/** The standard list filter. */
|
|
2113
|
-
filter?: string;
|
|
2114
2960
|
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
2115
2961
|
key?: string;
|
|
2116
|
-
/** The name of the
|
|
2962
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
2117
2963
|
name: string;
|
|
2118
2964
|
/** OAuth 2.0 token for the current user. */
|
|
2119
2965
|
oauth_token?: string;
|
|
2120
|
-
/** The
|
|
2966
|
+
/** Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
2121
2967
|
pageSize?: number;
|
|
2122
|
-
/**
|
|
2968
|
+
/** Optional. A page token received from a previous AccessSparkApplicationExecutorsList call. Provide this token to retrieve the subsequent page. */
|
|
2123
2969
|
pageToken?: string;
|
|
2970
|
+
/** Required. Parent (Batch) resource reference. */
|
|
2971
|
+
parent?: string;
|
|
2124
2972
|
/** Returns response with indentations and line breaks. */
|
|
2125
2973
|
prettyPrint?: boolean;
|
|
2126
2974
|
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
@@ -2129,7 +2977,1442 @@ declare namespace gapi.client {
|
|
|
2129
2977
|
upload_protocol?: string;
|
|
2130
2978
|
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
2131
2979
|
uploadType?: string;
|
|
2132
|
-
}): Request<
|
|
2980
|
+
}): Request<SearchSparkApplicationExecutorsResponse>;
|
|
2981
|
+
/** Obtain executor summary with respect to a spark stage attempt. */
|
|
2982
|
+
searchExecutorStageSummary(request?: {
|
|
2983
|
+
/** V1 error format. */
|
|
2984
|
+
'$.xgafv'?: string;
|
|
2985
|
+
/** OAuth access token. */
|
|
2986
|
+
access_token?: string;
|
|
2987
|
+
/** Data format for response. */
|
|
2988
|
+
alt?: string;
|
|
2989
|
+
/** JSONP */
|
|
2990
|
+
callback?: string;
|
|
2991
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
2992
|
+
fields?: string;
|
|
2993
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
2994
|
+
key?: string;
|
|
2995
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
2996
|
+
name: string;
|
|
2997
|
+
/** OAuth 2.0 token for the current user. */
|
|
2998
|
+
oauth_token?: string;
|
|
2999
|
+
/** Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
3000
|
+
pageSize?: number;
|
|
3001
|
+
/** Optional. A page token received from a previous AccessSparkApplicationExecutorsList call. Provide this token to retrieve the subsequent page. */
|
|
3002
|
+
pageToken?: string;
|
|
3003
|
+
/** Required. Parent (Batch) resource reference. */
|
|
3004
|
+
parent?: string;
|
|
3005
|
+
/** Returns response with indentations and line breaks. */
|
|
3006
|
+
prettyPrint?: boolean;
|
|
3007
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3008
|
+
quotaUser?: string;
|
|
3009
|
+
/** Required. Stage Attempt ID */
|
|
3010
|
+
stageAttemptId?: number;
|
|
3011
|
+
/** Required. Stage ID */
|
|
3012
|
+
stageId?: string;
|
|
3013
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3014
|
+
upload_protocol?: string;
|
|
3015
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3016
|
+
uploadType?: string;
|
|
3017
|
+
}): Request<SearchSparkApplicationExecutorStageSummaryResponse>;
|
|
3018
|
+
/** Obtain list of spark jobs corresponding to a Spark Application. */
|
|
3019
|
+
searchJobs(request?: {
|
|
3020
|
+
/** V1 error format. */
|
|
3021
|
+
'$.xgafv'?: string;
|
|
3022
|
+
/** OAuth access token. */
|
|
3023
|
+
access_token?: string;
|
|
3024
|
+
/** Data format for response. */
|
|
3025
|
+
alt?: string;
|
|
3026
|
+
/** JSONP */
|
|
3027
|
+
callback?: string;
|
|
3028
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3029
|
+
fields?: string;
|
|
3030
|
+
/** Optional. List only jobs in the specific state. */
|
|
3031
|
+
jobStatus?: string;
|
|
3032
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3033
|
+
key?: string;
|
|
3034
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
3035
|
+
name: string;
|
|
3036
|
+
/** OAuth 2.0 token for the current user. */
|
|
3037
|
+
oauth_token?: string;
|
|
3038
|
+
/** Optional. Maximum number of jobs to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
3039
|
+
pageSize?: number;
|
|
3040
|
+
/** Optional. A page token received from a previous SearchSparkApplicationJobs call. Provide this token to retrieve the subsequent page. */
|
|
3041
|
+
pageToken?: string;
|
|
3042
|
+
/** Required. Parent (Batch) resource reference. */
|
|
3043
|
+
parent?: string;
|
|
3044
|
+
/** Returns response with indentations and line breaks. */
|
|
3045
|
+
prettyPrint?: boolean;
|
|
3046
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3047
|
+
quotaUser?: string;
|
|
3048
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3049
|
+
upload_protocol?: string;
|
|
3050
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3051
|
+
uploadType?: string;
|
|
3052
|
+
}): Request<SearchSparkApplicationJobsResponse>;
|
|
3053
|
+
/** Obtain data corresponding to SQL Queries for a Spark Application. */
|
|
3054
|
+
searchSqlQueries(request?: {
|
|
3055
|
+
/** V1 error format. */
|
|
3056
|
+
'$.xgafv'?: string;
|
|
3057
|
+
/** OAuth access token. */
|
|
3058
|
+
access_token?: string;
|
|
3059
|
+
/** Data format for response. */
|
|
3060
|
+
alt?: string;
|
|
3061
|
+
/** JSONP */
|
|
3062
|
+
callback?: string;
|
|
3063
|
+
/** Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide. */
|
|
3064
|
+
details?: boolean;
|
|
3065
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3066
|
+
fields?: string;
|
|
3067
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3068
|
+
key?: string;
|
|
3069
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
3070
|
+
name: string;
|
|
3071
|
+
/** OAuth 2.0 token for the current user. */
|
|
3072
|
+
oauth_token?: string;
|
|
3073
|
+
/** Optional. Maximum number of queries to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
3074
|
+
pageSize?: number;
|
|
3075
|
+
/** Optional. A page token received from a previous SearchSparkApplicationSqlQueries call. Provide this token to retrieve the subsequent page. */
|
|
3076
|
+
pageToken?: string;
|
|
3077
|
+
/** Required. Parent (Batch) resource reference. */
|
|
3078
|
+
parent?: string;
|
|
3079
|
+
/** Optional. Enables/ disables physical plan description on demand */
|
|
3080
|
+
planDescription?: boolean;
|
|
3081
|
+
/** Returns response with indentations and line breaks. */
|
|
3082
|
+
prettyPrint?: boolean;
|
|
3083
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3084
|
+
quotaUser?: string;
|
|
3085
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3086
|
+
upload_protocol?: string;
|
|
3087
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3088
|
+
uploadType?: string;
|
|
3089
|
+
}): Request<SearchSparkApplicationSqlQueriesResponse>;
|
|
3090
|
+
/** Obtain data corresponding to a spark stage attempts for a Spark Application. */
|
|
3091
|
+
searchStageAttempts(request?: {
|
|
3092
|
+
/** V1 error format. */
|
|
3093
|
+
'$.xgafv'?: string;
|
|
3094
|
+
/** OAuth access token. */
|
|
3095
|
+
access_token?: string;
|
|
3096
|
+
/** Data format for response. */
|
|
3097
|
+
alt?: string;
|
|
3098
|
+
/** JSONP */
|
|
3099
|
+
callback?: string;
|
|
3100
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3101
|
+
fields?: string;
|
|
3102
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3103
|
+
key?: string;
|
|
3104
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
3105
|
+
name: string;
|
|
3106
|
+
/** OAuth 2.0 token for the current user. */
|
|
3107
|
+
oauth_token?: string;
|
|
3108
|
+
/** Optional. Maximum number of stage attempts (paging based on stage_attempt_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
3109
|
+
pageSize?: number;
|
|
3110
|
+
/** Optional. A page token received from a previous SearchSparkApplicationStageAttempts call. Provide this token to retrieve the subsequent page. */
|
|
3111
|
+
pageToken?: string;
|
|
3112
|
+
/** Required. Parent (Batch) resource reference. */
|
|
3113
|
+
parent?: string;
|
|
3114
|
+
/** Returns response with indentations and line breaks. */
|
|
3115
|
+
prettyPrint?: boolean;
|
|
3116
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3117
|
+
quotaUser?: string;
|
|
3118
|
+
/** Required. Stage ID for which attempts are to be fetched */
|
|
3119
|
+
stageId?: string;
|
|
3120
|
+
/** Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field */
|
|
3121
|
+
summaryMetricsMask?: string;
|
|
3122
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3123
|
+
upload_protocol?: string;
|
|
3124
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3125
|
+
uploadType?: string;
|
|
3126
|
+
}): Request<SearchSparkApplicationStageAttemptsResponse>;
|
|
3127
|
+
/** Obtain data corresponding to tasks for a spark stage attempt for a Spark Application. */
|
|
3128
|
+
searchStageAttemptTasks(request?: {
|
|
3129
|
+
/** V1 error format. */
|
|
3130
|
+
'$.xgafv'?: string;
|
|
3131
|
+
/** OAuth access token. */
|
|
3132
|
+
access_token?: string;
|
|
3133
|
+
/** Data format for response. */
|
|
3134
|
+
alt?: string;
|
|
3135
|
+
/** JSONP */
|
|
3136
|
+
callback?: string;
|
|
3137
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3138
|
+
fields?: string;
|
|
3139
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3140
|
+
key?: string;
|
|
3141
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
3142
|
+
name: string;
|
|
3143
|
+
/** OAuth 2.0 token for the current user. */
|
|
3144
|
+
oauth_token?: string;
|
|
3145
|
+
/** Optional. Maximum number of tasks to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
3146
|
+
pageSize?: number;
|
|
3147
|
+
/** Optional. A page token received from a previous ListSparkApplicationStageAttemptTasks call. Provide this token to retrieve the subsequent page. */
|
|
3148
|
+
pageToken?: string;
|
|
3149
|
+
/** Required. Parent (Batch) resource reference. */
|
|
3150
|
+
parent?: string;
|
|
3151
|
+
/** Returns response with indentations and line breaks. */
|
|
3152
|
+
prettyPrint?: boolean;
|
|
3153
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3154
|
+
quotaUser?: string;
|
|
3155
|
+
/** Optional. Sort the tasks by runtime. */
|
|
3156
|
+
sortRuntime?: boolean;
|
|
3157
|
+
/** Optional. Stage Attempt ID */
|
|
3158
|
+
stageAttemptId?: number;
|
|
3159
|
+
/** Optional. Stage ID */
|
|
3160
|
+
stageId?: string;
|
|
3161
|
+
/** Optional. List only tasks in the state. */
|
|
3162
|
+
taskStatus?: string;
|
|
3163
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3164
|
+
upload_protocol?: string;
|
|
3165
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3166
|
+
uploadType?: string;
|
|
3167
|
+
}): Request<SearchSparkApplicationStageAttemptTasksResponse>;
|
|
3168
|
+
/** Obtain data corresponding to stages for a Spark Application. */
|
|
3169
|
+
searchStages(request?: {
|
|
3170
|
+
/** V1 error format. */
|
|
3171
|
+
'$.xgafv'?: string;
|
|
3172
|
+
/** OAuth access token. */
|
|
3173
|
+
access_token?: string;
|
|
3174
|
+
/** Data format for response. */
|
|
3175
|
+
alt?: string;
|
|
3176
|
+
/** JSONP */
|
|
3177
|
+
callback?: string;
|
|
3178
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3179
|
+
fields?: string;
|
|
3180
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3181
|
+
key?: string;
|
|
3182
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
3183
|
+
name: string;
|
|
3184
|
+
/** OAuth 2.0 token for the current user. */
|
|
3185
|
+
oauth_token?: string;
|
|
3186
|
+
/** Optional. Maximum number of stages (paging based on stage_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
3187
|
+
pageSize?: number;
|
|
3188
|
+
/** Optional. A page token received from a previous FetchSparkApplicationStagesList call. Provide this token to retrieve the subsequent page. */
|
|
3189
|
+
pageToken?: string;
|
|
3190
|
+
/** Required. Parent (Batch) resource reference. */
|
|
3191
|
+
parent?: string;
|
|
3192
|
+
/** Returns response with indentations and line breaks. */
|
|
3193
|
+
prettyPrint?: boolean;
|
|
3194
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3195
|
+
quotaUser?: string;
|
|
3196
|
+
/** Optional. List only stages in the given state. */
|
|
3197
|
+
stageStatus?: string;
|
|
3198
|
+
/** Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field */
|
|
3199
|
+
summaryMetricsMask?: string;
|
|
3200
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3201
|
+
upload_protocol?: string;
|
|
3202
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3203
|
+
uploadType?: string;
|
|
3204
|
+
}): Request<SearchSparkApplicationStagesResponse>;
|
|
3205
|
+
/** Obtain summary of Executor Summary for a Spark Application */
|
|
3206
|
+
summarizeExecutors(request?: {
|
|
3207
|
+
/** V1 error format. */
|
|
3208
|
+
'$.xgafv'?: string;
|
|
3209
|
+
/** OAuth access token. */
|
|
3210
|
+
access_token?: string;
|
|
3211
|
+
/** Data format for response. */
|
|
3212
|
+
alt?: string;
|
|
3213
|
+
/** JSONP */
|
|
3214
|
+
callback?: string;
|
|
3215
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3216
|
+
fields?: string;
|
|
3217
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3218
|
+
key?: string;
|
|
3219
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
3220
|
+
name: string;
|
|
3221
|
+
/** OAuth 2.0 token for the current user. */
|
|
3222
|
+
oauth_token?: string;
|
|
3223
|
+
/** Required. Parent (Batch) resource reference. */
|
|
3224
|
+
parent?: string;
|
|
3225
|
+
/** Returns response with indentations and line breaks. */
|
|
3226
|
+
prettyPrint?: boolean;
|
|
3227
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3228
|
+
quotaUser?: string;
|
|
3229
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3230
|
+
upload_protocol?: string;
|
|
3231
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3232
|
+
uploadType?: string;
|
|
3233
|
+
}): Request<SummarizeSparkApplicationExecutorsResponse>;
|
|
3234
|
+
/** Obtain summary of Jobs for a Spark Application */
|
|
3235
|
+
summarizeJobs(request?: {
|
|
3236
|
+
/** V1 error format. */
|
|
3237
|
+
'$.xgafv'?: string;
|
|
3238
|
+
/** OAuth access token. */
|
|
3239
|
+
access_token?: string;
|
|
3240
|
+
/** Data format for response. */
|
|
3241
|
+
alt?: string;
|
|
3242
|
+
/** JSONP */
|
|
3243
|
+
callback?: string;
|
|
3244
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3245
|
+
fields?: string;
|
|
3246
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3247
|
+
key?: string;
|
|
3248
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
3249
|
+
name: string;
|
|
3250
|
+
/** OAuth 2.0 token for the current user. */
|
|
3251
|
+
oauth_token?: string;
|
|
3252
|
+
/** Required. Parent (Batch) resource reference. */
|
|
3253
|
+
parent?: string;
|
|
3254
|
+
/** Returns response with indentations and line breaks. */
|
|
3255
|
+
prettyPrint?: boolean;
|
|
3256
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3257
|
+
quotaUser?: string;
|
|
3258
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3259
|
+
upload_protocol?: string;
|
|
3260
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3261
|
+
uploadType?: string;
|
|
3262
|
+
}): Request<SummarizeSparkApplicationJobsResponse>;
|
|
3263
|
+
/** Obtain summary of Tasks for a Spark Application Stage Attempt */
|
|
3264
|
+
summarizeStageAttemptTasks(request?: {
|
|
3265
|
+
/** V1 error format. */
|
|
3266
|
+
'$.xgafv'?: string;
|
|
3267
|
+
/** OAuth access token. */
|
|
3268
|
+
access_token?: string;
|
|
3269
|
+
/** Data format for response. */
|
|
3270
|
+
alt?: string;
|
|
3271
|
+
/** JSONP */
|
|
3272
|
+
callback?: string;
|
|
3273
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3274
|
+
fields?: string;
|
|
3275
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3276
|
+
key?: string;
|
|
3277
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
3278
|
+
name: string;
|
|
3279
|
+
/** OAuth 2.0 token for the current user. */
|
|
3280
|
+
oauth_token?: string;
|
|
3281
|
+
/** Required. Parent (Batch) resource reference. */
|
|
3282
|
+
parent?: string;
|
|
3283
|
+
/** Returns response with indentations and line breaks. */
|
|
3284
|
+
prettyPrint?: boolean;
|
|
3285
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3286
|
+
quotaUser?: string;
|
|
3287
|
+
/** Required. Stage Attempt ID */
|
|
3288
|
+
stageAttemptId?: number;
|
|
3289
|
+
/** Required. Stage ID */
|
|
3290
|
+
stageId?: string;
|
|
3291
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3292
|
+
upload_protocol?: string;
|
|
3293
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3294
|
+
uploadType?: string;
|
|
3295
|
+
}): Request<SummarizeSparkApplicationStageAttemptTasksResponse>;
|
|
3296
|
+
/** Obtain summary of Stages for a Spark Application */
|
|
3297
|
+
summarizeStages(request?: {
|
|
3298
|
+
/** V1 error format. */
|
|
3299
|
+
'$.xgafv'?: string;
|
|
3300
|
+
/** OAuth access token. */
|
|
3301
|
+
access_token?: string;
|
|
3302
|
+
/** Data format for response. */
|
|
3303
|
+
alt?: string;
|
|
3304
|
+
/** JSONP */
|
|
3305
|
+
callback?: string;
|
|
3306
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3307
|
+
fields?: string;
|
|
3308
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3309
|
+
key?: string;
|
|
3310
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
3311
|
+
name: string;
|
|
3312
|
+
/** OAuth 2.0 token for the current user. */
|
|
3313
|
+
oauth_token?: string;
|
|
3314
|
+
/** Required. Parent (Batch) resource reference. */
|
|
3315
|
+
parent?: string;
|
|
3316
|
+
/** Returns response with indentations and line breaks. */
|
|
3317
|
+
prettyPrint?: boolean;
|
|
3318
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3319
|
+
quotaUser?: string;
|
|
3320
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3321
|
+
upload_protocol?: string;
|
|
3322
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3323
|
+
uploadType?: string;
|
|
3324
|
+
}): Request<SummarizeSparkApplicationStagesResponse>;
|
|
3325
|
+
/** Write wrapper objects from dataplane to spanner */
|
|
3326
|
+
write(request: {
|
|
3327
|
+
/** V1 error format. */
|
|
3328
|
+
'$.xgafv'?: string;
|
|
3329
|
+
/** OAuth access token. */
|
|
3330
|
+
access_token?: string;
|
|
3331
|
+
/** Data format for response. */
|
|
3332
|
+
alt?: string;
|
|
3333
|
+
/** JSONP */
|
|
3334
|
+
callback?: string;
|
|
3335
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3336
|
+
fields?: string;
|
|
3337
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3338
|
+
key?: string;
|
|
3339
|
+
/** Required. The fully qualified name of the spark application to write data about in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
3340
|
+
name: string;
|
|
3341
|
+
/** OAuth 2.0 token for the current user. */
|
|
3342
|
+
oauth_token?: string;
|
|
3343
|
+
/** Returns response with indentations and line breaks. */
|
|
3344
|
+
prettyPrint?: boolean;
|
|
3345
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3346
|
+
quotaUser?: string;
|
|
3347
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3348
|
+
upload_protocol?: string;
|
|
3349
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3350
|
+
uploadType?: string;
|
|
3351
|
+
/** Request body */
|
|
3352
|
+
resource: WriteSparkApplicationContextRequest;
|
|
3353
|
+
}): Request<{}>;
|
|
3354
|
+
write(
|
|
3355
|
+
request: {
|
|
3356
|
+
/** V1 error format. */
|
|
3357
|
+
'$.xgafv'?: string;
|
|
3358
|
+
/** OAuth access token. */
|
|
3359
|
+
access_token?: string;
|
|
3360
|
+
/** Data format for response. */
|
|
3361
|
+
alt?: string;
|
|
3362
|
+
/** JSONP */
|
|
3363
|
+
callback?: string;
|
|
3364
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3365
|
+
fields?: string;
|
|
3366
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3367
|
+
key?: string;
|
|
3368
|
+
/** Required. The fully qualified name of the spark application to write data about in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" */
|
|
3369
|
+
name: string;
|
|
3370
|
+
/** OAuth 2.0 token for the current user. */
|
|
3371
|
+
oauth_token?: string;
|
|
3372
|
+
/** Returns response with indentations and line breaks. */
|
|
3373
|
+
prettyPrint?: boolean;
|
|
3374
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3375
|
+
quotaUser?: string;
|
|
3376
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3377
|
+
upload_protocol?: string;
|
|
3378
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3379
|
+
uploadType?: string;
|
|
3380
|
+
},
|
|
3381
|
+
body: WriteSparkApplicationContextRequest
|
|
3382
|
+
): Request<{}>;
|
|
3383
|
+
}
|
|
3384
|
+
interface BatchesResource {
|
|
3385
|
+
/** Analyze a Batch for possible recommendations and insights. */
|
|
3386
|
+
analyze(request: {
|
|
3387
|
+
/** V1 error format. */
|
|
3388
|
+
'$.xgafv'?: string;
|
|
3389
|
+
/** OAuth access token. */
|
|
3390
|
+
access_token?: string;
|
|
3391
|
+
/** Data format for response. */
|
|
3392
|
+
alt?: string;
|
|
3393
|
+
/** JSONP */
|
|
3394
|
+
callback?: string;
|
|
3395
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3396
|
+
fields?: string;
|
|
3397
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3398
|
+
key?: string;
|
|
3399
|
+
/** Required. The fully qualified name of the batch to analyze in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
|
|
3400
|
+
name: string;
|
|
3401
|
+
/** OAuth 2.0 token for the current user. */
|
|
3402
|
+
oauth_token?: string;
|
|
3403
|
+
/** Returns response with indentations and line breaks. */
|
|
3404
|
+
prettyPrint?: boolean;
|
|
3405
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3406
|
+
quotaUser?: string;
|
|
3407
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3408
|
+
upload_protocol?: string;
|
|
3409
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3410
|
+
uploadType?: string;
|
|
3411
|
+
/** Request body */
|
|
3412
|
+
resource: AnalyzeBatchRequest;
|
|
3413
|
+
}): Request<Operation>;
|
|
3414
|
+
analyze(
|
|
3415
|
+
request: {
|
|
3416
|
+
/** V1 error format. */
|
|
3417
|
+
'$.xgafv'?: string;
|
|
3418
|
+
/** OAuth access token. */
|
|
3419
|
+
access_token?: string;
|
|
3420
|
+
/** Data format for response. */
|
|
3421
|
+
alt?: string;
|
|
3422
|
+
/** JSONP */
|
|
3423
|
+
callback?: string;
|
|
3424
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3425
|
+
fields?: string;
|
|
3426
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3427
|
+
key?: string;
|
|
3428
|
+
/** Required. The fully qualified name of the batch to analyze in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
|
|
3429
|
+
name: string;
|
|
3430
|
+
/** OAuth 2.0 token for the current user. */
|
|
3431
|
+
oauth_token?: string;
|
|
3432
|
+
/** Returns response with indentations and line breaks. */
|
|
3433
|
+
prettyPrint?: boolean;
|
|
3434
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3435
|
+
quotaUser?: string;
|
|
3436
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3437
|
+
upload_protocol?: string;
|
|
3438
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3439
|
+
uploadType?: string;
|
|
3440
|
+
},
|
|
3441
|
+
body: AnalyzeBatchRequest
|
|
3442
|
+
): Request<Operation>;
|
|
3443
|
+
/** Creates a batch workload that executes asynchronously. */
|
|
3444
|
+
create(request: {
|
|
3445
|
+
/** V1 error format. */
|
|
3446
|
+
'$.xgafv'?: string;
|
|
3447
|
+
/** OAuth access token. */
|
|
3448
|
+
access_token?: string;
|
|
3449
|
+
/** Data format for response. */
|
|
3450
|
+
alt?: string;
|
|
3451
|
+
/** Optional. The ID to use for the batch, which will become the final component of the batch's resource name.This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/. */
|
|
3452
|
+
batchId?: string;
|
|
3453
|
+
/** JSONP */
|
|
3454
|
+
callback?: string;
|
|
3455
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3456
|
+
fields?: string;
|
|
3457
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3458
|
+
key?: string;
|
|
3459
|
+
/** OAuth 2.0 token for the current user. */
|
|
3460
|
+
oauth_token?: string;
|
|
3461
|
+
/** Required. The parent resource where this batch will be created. */
|
|
3462
|
+
parent: string;
|
|
3463
|
+
/** Returns response with indentations and line breaks. */
|
|
3464
|
+
prettyPrint?: boolean;
|
|
3465
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3466
|
+
quotaUser?: string;
|
|
3467
|
+
/** Optional. A unique ID used to identify the request. If the service receives two CreateBatchRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first Batch created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. */
|
|
3468
|
+
requestId?: string;
|
|
3469
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3470
|
+
upload_protocol?: string;
|
|
3471
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3472
|
+
uploadType?: string;
|
|
3473
|
+
/** Request body */
|
|
3474
|
+
resource: Batch;
|
|
3475
|
+
}): Request<Operation>;
|
|
3476
|
+
create(
|
|
3477
|
+
request: {
|
|
3478
|
+
/** V1 error format. */
|
|
3479
|
+
'$.xgafv'?: string;
|
|
3480
|
+
/** OAuth access token. */
|
|
3481
|
+
access_token?: string;
|
|
3482
|
+
/** Data format for response. */
|
|
3483
|
+
alt?: string;
|
|
3484
|
+
/** Optional. The ID to use for the batch, which will become the final component of the batch's resource name.This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/. */
|
|
3485
|
+
batchId?: string;
|
|
3486
|
+
/** JSONP */
|
|
3487
|
+
callback?: string;
|
|
3488
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3489
|
+
fields?: string;
|
|
3490
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3491
|
+
key?: string;
|
|
3492
|
+
/** OAuth 2.0 token for the current user. */
|
|
3493
|
+
oauth_token?: string;
|
|
3494
|
+
/** Required. The parent resource where this batch will be created. */
|
|
3495
|
+
parent: string;
|
|
3496
|
+
/** Returns response with indentations and line breaks. */
|
|
3497
|
+
prettyPrint?: boolean;
|
|
3498
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3499
|
+
quotaUser?: string;
|
|
3500
|
+
/** Optional. A unique ID used to identify the request. If the service receives two CreateBatchRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first Batch created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. */
|
|
3501
|
+
requestId?: string;
|
|
3502
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3503
|
+
upload_protocol?: string;
|
|
3504
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3505
|
+
uploadType?: string;
|
|
3506
|
+
},
|
|
3507
|
+
body: Batch
|
|
3508
|
+
): Request<Operation>;
|
|
3509
|
+
/** Deletes the batch workload resource. If the batch is not in a CANCELLED, SUCCEEDED or FAILED State, the delete operation fails and the response returns FAILED_PRECONDITION. */
|
|
3510
|
+
delete(request?: {
|
|
3511
|
+
/** V1 error format. */
|
|
3512
|
+
'$.xgafv'?: string;
|
|
3513
|
+
/** OAuth access token. */
|
|
3514
|
+
access_token?: string;
|
|
3515
|
+
/** Data format for response. */
|
|
3516
|
+
alt?: string;
|
|
3517
|
+
/** JSONP */
|
|
3518
|
+
callback?: string;
|
|
3519
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3520
|
+
fields?: string;
|
|
3521
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3522
|
+
key?: string;
|
|
3523
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
|
|
3524
|
+
name: string;
|
|
3525
|
+
/** OAuth 2.0 token for the current user. */
|
|
3526
|
+
oauth_token?: string;
|
|
3527
|
+
/** Returns response with indentations and line breaks. */
|
|
3528
|
+
prettyPrint?: boolean;
|
|
3529
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3530
|
+
quotaUser?: string;
|
|
3531
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3532
|
+
upload_protocol?: string;
|
|
3533
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3534
|
+
uploadType?: string;
|
|
3535
|
+
}): Request<{}>;
|
|
3536
|
+
/** Gets the batch workload resource representation. */
|
|
3537
|
+
get(request?: {
|
|
3538
|
+
/** V1 error format. */
|
|
3539
|
+
'$.xgafv'?: string;
|
|
3540
|
+
/** OAuth access token. */
|
|
3541
|
+
access_token?: string;
|
|
3542
|
+
/** Data format for response. */
|
|
3543
|
+
alt?: string;
|
|
3544
|
+
/** JSONP */
|
|
3545
|
+
callback?: string;
|
|
3546
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3547
|
+
fields?: string;
|
|
3548
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3549
|
+
key?: string;
|
|
3550
|
+
/** Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" */
|
|
3551
|
+
name: string;
|
|
3552
|
+
/** OAuth 2.0 token for the current user. */
|
|
3553
|
+
oauth_token?: string;
|
|
3554
|
+
/** Returns response with indentations and line breaks. */
|
|
3555
|
+
prettyPrint?: boolean;
|
|
3556
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3557
|
+
quotaUser?: string;
|
|
3558
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3559
|
+
upload_protocol?: string;
|
|
3560
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3561
|
+
uploadType?: string;
|
|
3562
|
+
}): Request<Batch>;
|
|
3563
|
+
/** Lists batch workloads. */
|
|
3564
|
+
list(request?: {
|
|
3565
|
+
/** V1 error format. */
|
|
3566
|
+
'$.xgafv'?: string;
|
|
3567
|
+
/** OAuth access token. */
|
|
3568
|
+
access_token?: string;
|
|
3569
|
+
/** Data format for response. */
|
|
3570
|
+
alt?: string;
|
|
3571
|
+
/** JSONP */
|
|
3572
|
+
callback?: string;
|
|
3573
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3574
|
+
fields?: string;
|
|
3575
|
+
/** Optional. A filter for the batches to return in the response.A filter is a logical expression constraining the values of various fields in each batch resource. Filters are case sensitive, and may contain multiple clauses combined with logical operators (AND/OR). Supported fields are batch_id, batch_uuid, state, create_time, and labels.e.g. state = RUNNING and create_time < "2023-01-01T00:00:00Z" filters for batches in state RUNNING that were created before 2023-01-01. state = RUNNING and labels.environment=production filters for batches in state in a RUNNING state that have a production environment label.See https://google.aip.dev/assets/misc/ebnf-filtering.txt for a detailed description of the filter syntax and a list of supported comparisons. */
|
|
3576
|
+
filter?: string;
|
|
3577
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3578
|
+
key?: string;
|
|
3579
|
+
/** OAuth 2.0 token for the current user. */
|
|
3580
|
+
oauth_token?: string;
|
|
3581
|
+
/** Optional. Field(s) on which to sort the list of batches.Currently the only supported sort orders are unspecified (empty) and create_time desc to sort by most recently created batches first.See https://google.aip.dev/132#ordering for more details. */
|
|
3582
|
+
orderBy?: string;
|
|
3583
|
+
/** Optional. The maximum number of batches to return in each response. The service may return fewer than this value. The default page size is 20; the maximum page size is 1000. */
|
|
3584
|
+
pageSize?: number;
|
|
3585
|
+
/** Optional. A page token received from a previous ListBatches call. Provide this token to retrieve the subsequent page. */
|
|
3586
|
+
pageToken?: string;
|
|
3587
|
+
/** Required. The parent, which owns this collection of batches. */
|
|
3588
|
+
parent: string;
|
|
3589
|
+
/** Returns response with indentations and line breaks. */
|
|
3590
|
+
prettyPrint?: boolean;
|
|
3591
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3592
|
+
quotaUser?: string;
|
|
3593
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3594
|
+
upload_protocol?: string;
|
|
3595
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3596
|
+
uploadType?: string;
|
|
3597
|
+
}): Request<ListBatchesResponse>;
|
|
3598
|
+
sparkApplications: SparkApplicationsResource;
|
|
3599
|
+
}
|
|
3600
|
+
interface OperationsResource {
|
|
3601
|
+
/** Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED. */
|
|
3602
|
+
cancel(request?: {
|
|
3603
|
+
/** V1 error format. */
|
|
3604
|
+
'$.xgafv'?: string;
|
|
3605
|
+
/** OAuth access token. */
|
|
3606
|
+
access_token?: string;
|
|
3607
|
+
/** Data format for response. */
|
|
3608
|
+
alt?: string;
|
|
3609
|
+
/** JSONP */
|
|
3610
|
+
callback?: string;
|
|
3611
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3612
|
+
fields?: string;
|
|
3613
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3614
|
+
key?: string;
|
|
3615
|
+
/** The name of the operation resource to be cancelled. */
|
|
3616
|
+
name: string;
|
|
3617
|
+
/** OAuth 2.0 token for the current user. */
|
|
3618
|
+
oauth_token?: string;
|
|
3619
|
+
/** Returns response with indentations and line breaks. */
|
|
3620
|
+
prettyPrint?: boolean;
|
|
3621
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3622
|
+
quotaUser?: string;
|
|
3623
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3624
|
+
upload_protocol?: string;
|
|
3625
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3626
|
+
uploadType?: string;
|
|
3627
|
+
}): Request<{}>;
|
|
3628
|
+
/** Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. */
|
|
3629
|
+
delete(request?: {
|
|
3630
|
+
/** V1 error format. */
|
|
3631
|
+
'$.xgafv'?: string;
|
|
3632
|
+
/** OAuth access token. */
|
|
3633
|
+
access_token?: string;
|
|
3634
|
+
/** Data format for response. */
|
|
3635
|
+
alt?: string;
|
|
3636
|
+
/** JSONP */
|
|
3637
|
+
callback?: string;
|
|
3638
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3639
|
+
fields?: string;
|
|
3640
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3641
|
+
key?: string;
|
|
3642
|
+
/** The name of the operation resource to be deleted. */
|
|
3643
|
+
name: string;
|
|
3644
|
+
/** OAuth 2.0 token for the current user. */
|
|
3645
|
+
oauth_token?: string;
|
|
3646
|
+
/** Returns response with indentations and line breaks. */
|
|
3647
|
+
prettyPrint?: boolean;
|
|
3648
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3649
|
+
quotaUser?: string;
|
|
3650
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3651
|
+
upload_protocol?: string;
|
|
3652
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3653
|
+
uploadType?: string;
|
|
3654
|
+
}): Request<{}>;
|
|
3655
|
+
/** Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service. */
|
|
3656
|
+
get(request?: {
|
|
3657
|
+
/** V1 error format. */
|
|
3658
|
+
'$.xgafv'?: string;
|
|
3659
|
+
/** OAuth access token. */
|
|
3660
|
+
access_token?: string;
|
|
3661
|
+
/** Data format for response. */
|
|
3662
|
+
alt?: string;
|
|
3663
|
+
/** JSONP */
|
|
3664
|
+
callback?: string;
|
|
3665
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3666
|
+
fields?: string;
|
|
3667
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3668
|
+
key?: string;
|
|
3669
|
+
/** The name of the operation resource. */
|
|
3670
|
+
name: string;
|
|
3671
|
+
/** OAuth 2.0 token for the current user. */
|
|
3672
|
+
oauth_token?: string;
|
|
3673
|
+
/** Returns response with indentations and line breaks. */
|
|
3674
|
+
prettyPrint?: boolean;
|
|
3675
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3676
|
+
quotaUser?: string;
|
|
3677
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3678
|
+
upload_protocol?: string;
|
|
3679
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3680
|
+
uploadType?: string;
|
|
3681
|
+
}): Request<Operation>;
|
|
3682
|
+
/** Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED. */
|
|
3683
|
+
list(request?: {
|
|
3684
|
+
/** V1 error format. */
|
|
3685
|
+
'$.xgafv'?: string;
|
|
3686
|
+
/** OAuth access token. */
|
|
3687
|
+
access_token?: string;
|
|
3688
|
+
/** Data format for response. */
|
|
3689
|
+
alt?: string;
|
|
3690
|
+
/** JSONP */
|
|
3691
|
+
callback?: string;
|
|
3692
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3693
|
+
fields?: string;
|
|
3694
|
+
/** The standard list filter. */
|
|
3695
|
+
filter?: string;
|
|
3696
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3697
|
+
key?: string;
|
|
3698
|
+
/** The name of the operation's parent resource. */
|
|
3699
|
+
name: string;
|
|
3700
|
+
/** OAuth 2.0 token for the current user. */
|
|
3701
|
+
oauth_token?: string;
|
|
3702
|
+
/** The standard list page size. */
|
|
3703
|
+
pageSize?: number;
|
|
3704
|
+
/** The standard list page token. */
|
|
3705
|
+
pageToken?: string;
|
|
3706
|
+
/** Returns response with indentations and line breaks. */
|
|
3707
|
+
prettyPrint?: boolean;
|
|
3708
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3709
|
+
quotaUser?: string;
|
|
3710
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3711
|
+
upload_protocol?: string;
|
|
3712
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3713
|
+
uploadType?: string;
|
|
3714
|
+
}): Request<ListOperationsResponse>;
|
|
3715
|
+
}
|
|
3716
|
+
interface SparkApplicationsResource {
|
|
3717
|
+
/** Obtain high level information corresponding to a single Spark Application. */
|
|
3718
|
+
access(request?: {
|
|
3719
|
+
/** V1 error format. */
|
|
3720
|
+
'$.xgafv'?: string;
|
|
3721
|
+
/** OAuth access token. */
|
|
3722
|
+
access_token?: string;
|
|
3723
|
+
/** Data format for response. */
|
|
3724
|
+
alt?: string;
|
|
3725
|
+
/** JSONP */
|
|
3726
|
+
callback?: string;
|
|
3727
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3728
|
+
fields?: string;
|
|
3729
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3730
|
+
key?: string;
|
|
3731
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
3732
|
+
name: string;
|
|
3733
|
+
/** OAuth 2.0 token for the current user. */
|
|
3734
|
+
oauth_token?: string;
|
|
3735
|
+
/** Required. Parent (Session) resource reference. */
|
|
3736
|
+
parent?: string;
|
|
3737
|
+
/** Returns response with indentations and line breaks. */
|
|
3738
|
+
prettyPrint?: boolean;
|
|
3739
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3740
|
+
quotaUser?: string;
|
|
3741
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3742
|
+
upload_protocol?: string;
|
|
3743
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3744
|
+
uploadType?: string;
|
|
3745
|
+
}): Request<AccessSessionSparkApplicationResponse>;
|
|
3746
|
+
/** Obtain environment details for a Spark Application */
|
|
3747
|
+
accessEnvironmentInfo(request?: {
|
|
3748
|
+
/** V1 error format. */
|
|
3749
|
+
'$.xgafv'?: string;
|
|
3750
|
+
/** OAuth access token. */
|
|
3751
|
+
access_token?: string;
|
|
3752
|
+
/** Data format for response. */
|
|
3753
|
+
alt?: string;
|
|
3754
|
+
/** JSONP */
|
|
3755
|
+
callback?: string;
|
|
3756
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3757
|
+
fields?: string;
|
|
3758
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3759
|
+
key?: string;
|
|
3760
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
3761
|
+
name: string;
|
|
3762
|
+
/** OAuth 2.0 token for the current user. */
|
|
3763
|
+
oauth_token?: string;
|
|
3764
|
+
/** Required. Parent (Session) resource reference. */
|
|
3765
|
+
parent?: string;
|
|
3766
|
+
/** Returns response with indentations and line breaks. */
|
|
3767
|
+
prettyPrint?: boolean;
|
|
3768
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3769
|
+
quotaUser?: string;
|
|
3770
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3771
|
+
upload_protocol?: string;
|
|
3772
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3773
|
+
uploadType?: string;
|
|
3774
|
+
}): Request<AccessSessionSparkApplicationEnvironmentInfoResponse>;
|
|
3775
|
+
/** Obtain data corresponding to a spark job for a Spark Application. */
|
|
3776
|
+
accessJob(request?: {
|
|
3777
|
+
/** V1 error format. */
|
|
3778
|
+
'$.xgafv'?: string;
|
|
3779
|
+
/** OAuth access token. */
|
|
3780
|
+
access_token?: string;
|
|
3781
|
+
/** Data format for response. */
|
|
3782
|
+
alt?: string;
|
|
3783
|
+
/** JSONP */
|
|
3784
|
+
callback?: string;
|
|
3785
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3786
|
+
fields?: string;
|
|
3787
|
+
/** Required. Job ID to fetch data for. */
|
|
3788
|
+
jobId?: string;
|
|
3789
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3790
|
+
key?: string;
|
|
3791
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
3792
|
+
name: string;
|
|
3793
|
+
/** OAuth 2.0 token for the current user. */
|
|
3794
|
+
oauth_token?: string;
|
|
3795
|
+
/** Required. Parent (Session) resource reference. */
|
|
3796
|
+
parent?: string;
|
|
3797
|
+
/** Returns response with indentations and line breaks. */
|
|
3798
|
+
prettyPrint?: boolean;
|
|
3799
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3800
|
+
quotaUser?: string;
|
|
3801
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3802
|
+
upload_protocol?: string;
|
|
3803
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3804
|
+
uploadType?: string;
|
|
3805
|
+
}): Request<AccessSessionSparkApplicationJobResponse>;
|
|
3806
|
+
/** Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000. */
|
|
3807
|
+
accessSqlPlan(request?: {
|
|
3808
|
+
/** V1 error format. */
|
|
3809
|
+
'$.xgafv'?: string;
|
|
3810
|
+
/** OAuth access token. */
|
|
3811
|
+
access_token?: string;
|
|
3812
|
+
/** Data format for response. */
|
|
3813
|
+
alt?: string;
|
|
3814
|
+
/** JSONP */
|
|
3815
|
+
callback?: string;
|
|
3816
|
+
/** Required. Execution ID */
|
|
3817
|
+
executionId?: string;
|
|
3818
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3819
|
+
fields?: string;
|
|
3820
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3821
|
+
key?: string;
|
|
3822
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
3823
|
+
name: string;
|
|
3824
|
+
/** OAuth 2.0 token for the current user. */
|
|
3825
|
+
oauth_token?: string;
|
|
3826
|
+
/** Required. Parent (Session) resource reference. */
|
|
3827
|
+
parent?: string;
|
|
3828
|
+
/** Returns response with indentations and line breaks. */
|
|
3829
|
+
prettyPrint?: boolean;
|
|
3830
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3831
|
+
quotaUser?: string;
|
|
3832
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3833
|
+
upload_protocol?: string;
|
|
3834
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3835
|
+
uploadType?: string;
|
|
3836
|
+
}): Request<AccessSessionSparkApplicationSqlSparkPlanGraphResponse>;
|
|
3837
|
+
/** Obtain data corresponding to a particular SQL Query for a Spark Application. */
|
|
3838
|
+
accessSqlQuery(request?: {
|
|
3839
|
+
/** V1 error format. */
|
|
3840
|
+
'$.xgafv'?: string;
|
|
3841
|
+
/** OAuth access token. */
|
|
3842
|
+
access_token?: string;
|
|
3843
|
+
/** Data format for response. */
|
|
3844
|
+
alt?: string;
|
|
3845
|
+
/** JSONP */
|
|
3846
|
+
callback?: string;
|
|
3847
|
+
/** Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide. */
|
|
3848
|
+
details?: boolean;
|
|
3849
|
+
/** Required. Execution ID */
|
|
3850
|
+
executionId?: string;
|
|
3851
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3852
|
+
fields?: string;
|
|
3853
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3854
|
+
key?: string;
|
|
3855
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
3856
|
+
name: string;
|
|
3857
|
+
/** OAuth 2.0 token for the current user. */
|
|
3858
|
+
oauth_token?: string;
|
|
3859
|
+
/** Required. Parent (Session) resource reference. */
|
|
3860
|
+
parent?: string;
|
|
3861
|
+
/** Optional. Enables/ disables physical plan description on demand */
|
|
3862
|
+
planDescription?: boolean;
|
|
3863
|
+
/** Returns response with indentations and line breaks. */
|
|
3864
|
+
prettyPrint?: boolean;
|
|
3865
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3866
|
+
quotaUser?: string;
|
|
3867
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3868
|
+
upload_protocol?: string;
|
|
3869
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3870
|
+
uploadType?: string;
|
|
3871
|
+
}): Request<AccessSessionSparkApplicationSqlQueryResponse>;
|
|
3872
|
+
/** Obtain data corresponding to a spark stage attempt for a Spark Application. */
|
|
3873
|
+
accessStageAttempt(request?: {
|
|
3874
|
+
/** V1 error format. */
|
|
3875
|
+
'$.xgafv'?: string;
|
|
3876
|
+
/** OAuth access token. */
|
|
3877
|
+
access_token?: string;
|
|
3878
|
+
/** Data format for response. */
|
|
3879
|
+
alt?: string;
|
|
3880
|
+
/** JSONP */
|
|
3881
|
+
callback?: string;
|
|
3882
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3883
|
+
fields?: string;
|
|
3884
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3885
|
+
key?: string;
|
|
3886
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
3887
|
+
name: string;
|
|
3888
|
+
/** OAuth 2.0 token for the current user. */
|
|
3889
|
+
oauth_token?: string;
|
|
3890
|
+
/** Required. Parent (Session) resource reference. */
|
|
3891
|
+
parent?: string;
|
|
3892
|
+
/** Returns response with indentations and line breaks. */
|
|
3893
|
+
prettyPrint?: boolean;
|
|
3894
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3895
|
+
quotaUser?: string;
|
|
3896
|
+
/** Required. Stage Attempt ID */
|
|
3897
|
+
stageAttemptId?: number;
|
|
3898
|
+
/** Required. Stage ID */
|
|
3899
|
+
stageId?: string;
|
|
3900
|
+
/** Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field */
|
|
3901
|
+
summaryMetricsMask?: string;
|
|
3902
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3903
|
+
upload_protocol?: string;
|
|
3904
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3905
|
+
uploadType?: string;
|
|
3906
|
+
}): Request<AccessSessionSparkApplicationStageAttemptResponse>;
|
|
3907
|
+
/** Obtain RDD operation graph for a Spark Application Stage. Limits the number of clusters returned as part of the graph to 10000. */
|
|
3908
|
+
accessStageRddGraph(request?: {
|
|
3909
|
+
/** V1 error format. */
|
|
3910
|
+
'$.xgafv'?: string;
|
|
3911
|
+
/** OAuth access token. */
|
|
3912
|
+
access_token?: string;
|
|
3913
|
+
/** Data format for response. */
|
|
3914
|
+
alt?: string;
|
|
3915
|
+
/** JSONP */
|
|
3916
|
+
callback?: string;
|
|
3917
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3918
|
+
fields?: string;
|
|
3919
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3920
|
+
key?: string;
|
|
3921
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
3922
|
+
name: string;
|
|
3923
|
+
/** OAuth 2.0 token for the current user. */
|
|
3924
|
+
oauth_token?: string;
|
|
3925
|
+
/** Required. Parent (Session) resource reference. */
|
|
3926
|
+
parent?: string;
|
|
3927
|
+
/** Returns response with indentations and line breaks. */
|
|
3928
|
+
prettyPrint?: boolean;
|
|
3929
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3930
|
+
quotaUser?: string;
|
|
3931
|
+
/** Required. Stage ID */
|
|
3932
|
+
stageId?: string;
|
|
3933
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3934
|
+
upload_protocol?: string;
|
|
3935
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3936
|
+
uploadType?: string;
|
|
3937
|
+
}): Request<AccessSessionSparkApplicationStageRddOperationGraphResponse>;
|
|
3938
|
+
/** Obtain high level information and list of Spark Applications corresponding to a batch */
|
|
3939
|
+
search(request?: {
|
|
3940
|
+
/** V1 error format. */
|
|
3941
|
+
'$.xgafv'?: string;
|
|
3942
|
+
/** OAuth access token. */
|
|
3943
|
+
access_token?: string;
|
|
3944
|
+
/** Data format for response. */
|
|
3945
|
+
alt?: string;
|
|
3946
|
+
/** Optional. Search only applications in the chosen state. */
|
|
3947
|
+
applicationStatus?: string;
|
|
3948
|
+
/** JSONP */
|
|
3949
|
+
callback?: string;
|
|
3950
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3951
|
+
fields?: string;
|
|
3952
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3953
|
+
key?: string;
|
|
3954
|
+
/** Optional. Latest end timestamp to list. */
|
|
3955
|
+
maxEndTime?: string;
|
|
3956
|
+
/** Optional. Latest start timestamp to list. */
|
|
3957
|
+
maxTime?: string;
|
|
3958
|
+
/** Optional. Earliest end timestamp to list. */
|
|
3959
|
+
minEndTime?: string;
|
|
3960
|
+
/** Optional. Earliest start timestamp to list. */
|
|
3961
|
+
minTime?: string;
|
|
3962
|
+
/** OAuth 2.0 token for the current user. */
|
|
3963
|
+
oauth_token?: string;
|
|
3964
|
+
/** Optional. Maximum number of applications to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
3965
|
+
pageSize?: number;
|
|
3966
|
+
/** Optional. A page token received from a previous SearchSessionSparkApplications call. Provide this token to retrieve the subsequent page. */
|
|
3967
|
+
pageToken?: string;
|
|
3968
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID" */
|
|
3969
|
+
parent: string;
|
|
3970
|
+
/** Returns response with indentations and line breaks. */
|
|
3971
|
+
prettyPrint?: boolean;
|
|
3972
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
3973
|
+
quotaUser?: string;
|
|
3974
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
3975
|
+
upload_protocol?: string;
|
|
3976
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
3977
|
+
uploadType?: string;
|
|
3978
|
+
}): Request<SearchSessionSparkApplicationsResponse>;
|
|
3979
|
+
/** Obtain data corresponding to executors for a Spark Application. */
|
|
3980
|
+
searchExecutors(request?: {
|
|
3981
|
+
/** V1 error format. */
|
|
3982
|
+
'$.xgafv'?: string;
|
|
3983
|
+
/** OAuth access token. */
|
|
3984
|
+
access_token?: string;
|
|
3985
|
+
/** Data format for response. */
|
|
3986
|
+
alt?: string;
|
|
3987
|
+
/** JSONP */
|
|
3988
|
+
callback?: string;
|
|
3989
|
+
/** Optional. Filter to select whether active/ dead or all executors should be selected. */
|
|
3990
|
+
executorStatus?: string;
|
|
3991
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
3992
|
+
fields?: string;
|
|
3993
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
3994
|
+
key?: string;
|
|
3995
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
3996
|
+
name: string;
|
|
3997
|
+
/** OAuth 2.0 token for the current user. */
|
|
3998
|
+
oauth_token?: string;
|
|
3999
|
+
/** Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
4000
|
+
pageSize?: number;
|
|
4001
|
+
/** Optional. A page token received from a previous SearchSessionSparkApplicationExecutors call. Provide this token to retrieve the subsequent page. */
|
|
4002
|
+
pageToken?: string;
|
|
4003
|
+
/** Required. Parent (Session) resource reference. */
|
|
4004
|
+
parent?: string;
|
|
4005
|
+
/** Returns response with indentations and line breaks. */
|
|
4006
|
+
prettyPrint?: boolean;
|
|
4007
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
4008
|
+
quotaUser?: string;
|
|
4009
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
4010
|
+
upload_protocol?: string;
|
|
4011
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
4012
|
+
uploadType?: string;
|
|
4013
|
+
}): Request<SearchSessionSparkApplicationExecutorsResponse>;
|
|
4014
|
+
/** Obtain executor summary with respect to a spark stage attempt. */
|
|
4015
|
+
searchExecutorStageSummary(request?: {
|
|
4016
|
+
/** V1 error format. */
|
|
4017
|
+
'$.xgafv'?: string;
|
|
4018
|
+
/** OAuth access token. */
|
|
4019
|
+
access_token?: string;
|
|
4020
|
+
/** Data format for response. */
|
|
4021
|
+
alt?: string;
|
|
4022
|
+
/** JSONP */
|
|
4023
|
+
callback?: string;
|
|
4024
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
4025
|
+
fields?: string;
|
|
4026
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
4027
|
+
key?: string;
|
|
4028
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
4029
|
+
name: string;
|
|
4030
|
+
/** OAuth 2.0 token for the current user. */
|
|
4031
|
+
oauth_token?: string;
|
|
4032
|
+
/** Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
4033
|
+
pageSize?: number;
|
|
4034
|
+
/** Optional. A page token received from a previous SearchSessionSparkApplicationExecutorStageSummary call. Provide this token to retrieve the subsequent page. */
|
|
4035
|
+
pageToken?: string;
|
|
4036
|
+
/** Required. Parent (Session) resource reference. */
|
|
4037
|
+
parent?: string;
|
|
4038
|
+
/** Returns response with indentations and line breaks. */
|
|
4039
|
+
prettyPrint?: boolean;
|
|
4040
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
4041
|
+
quotaUser?: string;
|
|
4042
|
+
/** Required. Stage Attempt ID */
|
|
4043
|
+
stageAttemptId?: number;
|
|
4044
|
+
/** Required. Stage ID */
|
|
4045
|
+
stageId?: string;
|
|
4046
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
4047
|
+
upload_protocol?: string;
|
|
4048
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
4049
|
+
uploadType?: string;
|
|
4050
|
+
}): Request<SearchSessionSparkApplicationExecutorStageSummaryResponse>;
|
|
4051
|
+
/** Obtain list of spark jobs corresponding to a Spark Application. */
|
|
4052
|
+
searchJobs(request?: {
|
|
4053
|
+
/** V1 error format. */
|
|
4054
|
+
'$.xgafv'?: string;
|
|
4055
|
+
/** OAuth access token. */
|
|
4056
|
+
access_token?: string;
|
|
4057
|
+
/** Data format for response. */
|
|
4058
|
+
alt?: string;
|
|
4059
|
+
/** JSONP */
|
|
4060
|
+
callback?: string;
|
|
4061
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
4062
|
+
fields?: string;
|
|
4063
|
+
/** Optional. List only jobs in the specific state. */
|
|
4064
|
+
jobStatus?: string;
|
|
4065
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
4066
|
+
key?: string;
|
|
4067
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
4068
|
+
name: string;
|
|
4069
|
+
/** OAuth 2.0 token for the current user. */
|
|
4070
|
+
oauth_token?: string;
|
|
4071
|
+
/** Optional. Maximum number of jobs to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
4072
|
+
pageSize?: number;
|
|
4073
|
+
/** Optional. A page token received from a previous SearchSessionSparkApplicationJobs call. Provide this token to retrieve the subsequent page. */
|
|
4074
|
+
pageToken?: string;
|
|
4075
|
+
/** Required. Parent (Session) resource reference. */
|
|
4076
|
+
parent?: string;
|
|
4077
|
+
/** Returns response with indentations and line breaks. */
|
|
4078
|
+
prettyPrint?: boolean;
|
|
4079
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
4080
|
+
quotaUser?: string;
|
|
4081
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
4082
|
+
upload_protocol?: string;
|
|
4083
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
4084
|
+
uploadType?: string;
|
|
4085
|
+
}): Request<SearchSessionSparkApplicationJobsResponse>;
|
|
4086
|
+
/** Obtain data corresponding to SQL Queries for a Spark Application. */
|
|
4087
|
+
searchSqlQueries(request?: {
|
|
4088
|
+
/** V1 error format. */
|
|
4089
|
+
'$.xgafv'?: string;
|
|
4090
|
+
/** OAuth access token. */
|
|
4091
|
+
access_token?: string;
|
|
4092
|
+
/** Data format for response. */
|
|
4093
|
+
alt?: string;
|
|
4094
|
+
/** JSONP */
|
|
4095
|
+
callback?: string;
|
|
4096
|
+
/** Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide. */
|
|
4097
|
+
details?: boolean;
|
|
4098
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
4099
|
+
fields?: string;
|
|
4100
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
4101
|
+
key?: string;
|
|
4102
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
4103
|
+
name: string;
|
|
4104
|
+
/** OAuth 2.0 token for the current user. */
|
|
4105
|
+
oauth_token?: string;
|
|
4106
|
+
/** Optional. Maximum number of queries to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
4107
|
+
pageSize?: number;
|
|
4108
|
+
/** Optional. A page token received from a previous SearchSessionSparkApplicationSqlQueries call. Provide this token to retrieve the subsequent page. */
|
|
4109
|
+
pageToken?: string;
|
|
4110
|
+
/** Required. Parent (Session) resource reference. */
|
|
4111
|
+
parent?: string;
|
|
4112
|
+
/** Optional. Enables/ disables physical plan description on demand */
|
|
4113
|
+
planDescription?: boolean;
|
|
4114
|
+
/** Returns response with indentations and line breaks. */
|
|
4115
|
+
prettyPrint?: boolean;
|
|
4116
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
4117
|
+
quotaUser?: string;
|
|
4118
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
4119
|
+
upload_protocol?: string;
|
|
4120
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
4121
|
+
uploadType?: string;
|
|
4122
|
+
}): Request<SearchSessionSparkApplicationSqlQueriesResponse>;
|
|
4123
|
+
/** Obtain data corresponding to a spark stage attempts for a Spark Application. */
|
|
4124
|
+
searchStageAttempts(request?: {
|
|
4125
|
+
/** V1 error format. */
|
|
4126
|
+
'$.xgafv'?: string;
|
|
4127
|
+
/** OAuth access token. */
|
|
4128
|
+
access_token?: string;
|
|
4129
|
+
/** Data format for response. */
|
|
4130
|
+
alt?: string;
|
|
4131
|
+
/** JSONP */
|
|
4132
|
+
callback?: string;
|
|
4133
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
4134
|
+
fields?: string;
|
|
4135
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
4136
|
+
key?: string;
|
|
4137
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
4138
|
+
name: string;
|
|
4139
|
+
/** OAuth 2.0 token for the current user. */
|
|
4140
|
+
oauth_token?: string;
|
|
4141
|
+
/** Optional. Maximum number of stage attempts (paging based on stage_attempt_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
4142
|
+
pageSize?: number;
|
|
4143
|
+
/** Optional. A page token received from a previous SearchSessionSparkApplicationStageAttempts call. Provide this token to retrieve the subsequent page. */
|
|
4144
|
+
pageToken?: string;
|
|
4145
|
+
/** Required. Parent (Session) resource reference. */
|
|
4146
|
+
parent?: string;
|
|
4147
|
+
/** Returns response with indentations and line breaks. */
|
|
4148
|
+
prettyPrint?: boolean;
|
|
4149
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
4150
|
+
quotaUser?: string;
|
|
4151
|
+
/** Required. Stage ID for which attempts are to be fetched */
|
|
4152
|
+
stageId?: string;
|
|
4153
|
+
/** Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field */
|
|
4154
|
+
summaryMetricsMask?: string;
|
|
4155
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
4156
|
+
upload_protocol?: string;
|
|
4157
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
4158
|
+
uploadType?: string;
|
|
4159
|
+
}): Request<SearchSessionSparkApplicationStageAttemptsResponse>;
|
|
4160
|
+
/** Obtain data corresponding to tasks for a spark stage attempt for a Spark Application. */
|
|
4161
|
+
searchStageAttemptTasks(request?: {
|
|
4162
|
+
/** V1 error format. */
|
|
4163
|
+
'$.xgafv'?: string;
|
|
4164
|
+
/** OAuth access token. */
|
|
4165
|
+
access_token?: string;
|
|
4166
|
+
/** Data format for response. */
|
|
4167
|
+
alt?: string;
|
|
4168
|
+
/** JSONP */
|
|
4169
|
+
callback?: string;
|
|
4170
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
4171
|
+
fields?: string;
|
|
4172
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
4173
|
+
key?: string;
|
|
4174
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
4175
|
+
name: string;
|
|
4176
|
+
/** OAuth 2.0 token for the current user. */
|
|
4177
|
+
oauth_token?: string;
|
|
4178
|
+
/** Optional. Maximum number of tasks to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
4179
|
+
pageSize?: number;
|
|
4180
|
+
/** Optional. A page token received from a previous SearchSessionSparkApplicationStageAttemptTasks call. Provide this token to retrieve the subsequent page. */
|
|
4181
|
+
pageToken?: string;
|
|
4182
|
+
/** Required. Parent (Session) resource reference. */
|
|
4183
|
+
parent?: string;
|
|
4184
|
+
/** Returns response with indentations and line breaks. */
|
|
4185
|
+
prettyPrint?: boolean;
|
|
4186
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
4187
|
+
quotaUser?: string;
|
|
4188
|
+
/** Optional. Sort the tasks by runtime. */
|
|
4189
|
+
sortRuntime?: boolean;
|
|
4190
|
+
/** Optional. Stage Attempt ID */
|
|
4191
|
+
stageAttemptId?: number;
|
|
4192
|
+
/** Optional. Stage ID */
|
|
4193
|
+
stageId?: string;
|
|
4194
|
+
/** Optional. List only tasks in the state. */
|
|
4195
|
+
taskStatus?: string;
|
|
4196
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
4197
|
+
upload_protocol?: string;
|
|
4198
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
4199
|
+
uploadType?: string;
|
|
4200
|
+
}): Request<SearchSessionSparkApplicationStageAttemptTasksResponse>;
|
|
4201
|
+
/** Obtain data corresponding to stages for a Spark Application. */
|
|
4202
|
+
searchStages(request?: {
|
|
4203
|
+
/** V1 error format. */
|
|
4204
|
+
'$.xgafv'?: string;
|
|
4205
|
+
/** OAuth access token. */
|
|
4206
|
+
access_token?: string;
|
|
4207
|
+
/** Data format for response. */
|
|
4208
|
+
alt?: string;
|
|
4209
|
+
/** JSONP */
|
|
4210
|
+
callback?: string;
|
|
4211
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
4212
|
+
fields?: string;
|
|
4213
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
4214
|
+
key?: string;
|
|
4215
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
4216
|
+
name: string;
|
|
4217
|
+
/** OAuth 2.0 token for the current user. */
|
|
4218
|
+
oauth_token?: string;
|
|
4219
|
+
/** Optional. Maximum number of stages (paging based on stage_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100. */
|
|
4220
|
+
pageSize?: number;
|
|
4221
|
+
/** Optional. A page token received from a previous SearchSessionSparkApplicationStages call. Provide this token to retrieve the subsequent page. */
|
|
4222
|
+
pageToken?: string;
|
|
4223
|
+
/** Required. Parent (Session) resource reference. */
|
|
4224
|
+
parent?: string;
|
|
4225
|
+
/** Returns response with indentations and line breaks. */
|
|
4226
|
+
prettyPrint?: boolean;
|
|
4227
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
4228
|
+
quotaUser?: string;
|
|
4229
|
+
/** Optional. List only stages in the given state. */
|
|
4230
|
+
stageStatus?: string;
|
|
4231
|
+
/** Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field */
|
|
4232
|
+
summaryMetricsMask?: string;
|
|
4233
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
4234
|
+
upload_protocol?: string;
|
|
4235
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
4236
|
+
uploadType?: string;
|
|
4237
|
+
}): Request<SearchSessionSparkApplicationStagesResponse>;
|
|
4238
|
+
/** Obtain summary of Executor Summary for a Spark Application */
|
|
4239
|
+
summarizeExecutors(request?: {
|
|
4240
|
+
/** V1 error format. */
|
|
4241
|
+
'$.xgafv'?: string;
|
|
4242
|
+
/** OAuth access token. */
|
|
4243
|
+
access_token?: string;
|
|
4244
|
+
/** Data format for response. */
|
|
4245
|
+
alt?: string;
|
|
4246
|
+
/** JSONP */
|
|
4247
|
+
callback?: string;
|
|
4248
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
4249
|
+
fields?: string;
|
|
4250
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
4251
|
+
key?: string;
|
|
4252
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
4253
|
+
name: string;
|
|
4254
|
+
/** OAuth 2.0 token for the current user. */
|
|
4255
|
+
oauth_token?: string;
|
|
4256
|
+
/** Required. Parent (Session) resource reference. */
|
|
4257
|
+
parent?: string;
|
|
4258
|
+
/** Returns response with indentations and line breaks. */
|
|
4259
|
+
prettyPrint?: boolean;
|
|
4260
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
4261
|
+
quotaUser?: string;
|
|
4262
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
4263
|
+
upload_protocol?: string;
|
|
4264
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
4265
|
+
uploadType?: string;
|
|
4266
|
+
}): Request<SummarizeSessionSparkApplicationExecutorsResponse>;
|
|
4267
|
+
/** Obtain summary of Jobs for a Spark Application */
|
|
4268
|
+
summarizeJobs(request?: {
|
|
4269
|
+
/** V1 error format. */
|
|
4270
|
+
'$.xgafv'?: string;
|
|
4271
|
+
/** OAuth access token. */
|
|
4272
|
+
access_token?: string;
|
|
4273
|
+
/** Data format for response. */
|
|
4274
|
+
alt?: string;
|
|
4275
|
+
/** JSONP */
|
|
4276
|
+
callback?: string;
|
|
4277
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
4278
|
+
fields?: string;
|
|
4279
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
4280
|
+
key?: string;
|
|
4281
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
4282
|
+
name: string;
|
|
4283
|
+
/** OAuth 2.0 token for the current user. */
|
|
4284
|
+
oauth_token?: string;
|
|
4285
|
+
/** Required. Parent (Session) resource reference. */
|
|
4286
|
+
parent?: string;
|
|
4287
|
+
/** Returns response with indentations and line breaks. */
|
|
4288
|
+
prettyPrint?: boolean;
|
|
4289
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
4290
|
+
quotaUser?: string;
|
|
4291
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
4292
|
+
upload_protocol?: string;
|
|
4293
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
4294
|
+
uploadType?: string;
|
|
4295
|
+
}): Request<SummarizeSessionSparkApplicationJobsResponse>;
|
|
4296
|
+
/** Obtain summary of Tasks for a Spark Application Stage Attempt */
|
|
4297
|
+
summarizeStageAttemptTasks(request?: {
|
|
4298
|
+
/** V1 error format. */
|
|
4299
|
+
'$.xgafv'?: string;
|
|
4300
|
+
/** OAuth access token. */
|
|
4301
|
+
access_token?: string;
|
|
4302
|
+
/** Data format for response. */
|
|
4303
|
+
alt?: string;
|
|
4304
|
+
/** JSONP */
|
|
4305
|
+
callback?: string;
|
|
4306
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
4307
|
+
fields?: string;
|
|
4308
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
4309
|
+
key?: string;
|
|
4310
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
4311
|
+
name: string;
|
|
4312
|
+
/** OAuth 2.0 token for the current user. */
|
|
4313
|
+
oauth_token?: string;
|
|
4314
|
+
/** Required. Parent (Session) resource reference. */
|
|
4315
|
+
parent?: string;
|
|
4316
|
+
/** Returns response with indentations and line breaks. */
|
|
4317
|
+
prettyPrint?: boolean;
|
|
4318
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
4319
|
+
quotaUser?: string;
|
|
4320
|
+
/** Required. Stage Attempt ID */
|
|
4321
|
+
stageAttemptId?: number;
|
|
4322
|
+
/** Required. Stage ID */
|
|
4323
|
+
stageId?: string;
|
|
4324
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
4325
|
+
upload_protocol?: string;
|
|
4326
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
4327
|
+
uploadType?: string;
|
|
4328
|
+
}): Request<SummarizeSessionSparkApplicationStageAttemptTasksResponse>;
|
|
4329
|
+
/** Obtain summary of Stages for a Spark Application */
|
|
4330
|
+
summarizeStages(request?: {
|
|
4331
|
+
/** V1 error format. */
|
|
4332
|
+
'$.xgafv'?: string;
|
|
4333
|
+
/** OAuth access token. */
|
|
4334
|
+
access_token?: string;
|
|
4335
|
+
/** Data format for response. */
|
|
4336
|
+
alt?: string;
|
|
4337
|
+
/** JSONP */
|
|
4338
|
+
callback?: string;
|
|
4339
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
4340
|
+
fields?: string;
|
|
4341
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
4342
|
+
key?: string;
|
|
4343
|
+
/** Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
4344
|
+
name: string;
|
|
4345
|
+
/** OAuth 2.0 token for the current user. */
|
|
4346
|
+
oauth_token?: string;
|
|
4347
|
+
/** Required. Parent (Session) resource reference. */
|
|
4348
|
+
parent?: string;
|
|
4349
|
+
/** Returns response with indentations and line breaks. */
|
|
4350
|
+
prettyPrint?: boolean;
|
|
4351
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
4352
|
+
quotaUser?: string;
|
|
4353
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
4354
|
+
upload_protocol?: string;
|
|
4355
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
4356
|
+
uploadType?: string;
|
|
4357
|
+
}): Request<SummarizeSessionSparkApplicationStagesResponse>;
|
|
4358
|
+
/** Write wrapper objects from dataplane to spanner */
|
|
4359
|
+
write(request: {
|
|
4360
|
+
/** V1 error format. */
|
|
4361
|
+
'$.xgafv'?: string;
|
|
4362
|
+
/** OAuth access token. */
|
|
4363
|
+
access_token?: string;
|
|
4364
|
+
/** Data format for response. */
|
|
4365
|
+
alt?: string;
|
|
4366
|
+
/** JSONP */
|
|
4367
|
+
callback?: string;
|
|
4368
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
4369
|
+
fields?: string;
|
|
4370
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
4371
|
+
key?: string;
|
|
4372
|
+
/** Required. The fully qualified name of the spark application to write data about in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
4373
|
+
name: string;
|
|
4374
|
+
/** OAuth 2.0 token for the current user. */
|
|
4375
|
+
oauth_token?: string;
|
|
4376
|
+
/** Returns response with indentations and line breaks. */
|
|
4377
|
+
prettyPrint?: boolean;
|
|
4378
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
4379
|
+
quotaUser?: string;
|
|
4380
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
4381
|
+
upload_protocol?: string;
|
|
4382
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
4383
|
+
uploadType?: string;
|
|
4384
|
+
/** Request body */
|
|
4385
|
+
resource: WriteSessionSparkApplicationContextRequest;
|
|
4386
|
+
}): Request<{}>;
|
|
4387
|
+
write(
|
|
4388
|
+
request: {
|
|
4389
|
+
/** V1 error format. */
|
|
4390
|
+
'$.xgafv'?: string;
|
|
4391
|
+
/** OAuth access token. */
|
|
4392
|
+
access_token?: string;
|
|
4393
|
+
/** Data format for response. */
|
|
4394
|
+
alt?: string;
|
|
4395
|
+
/** JSONP */
|
|
4396
|
+
callback?: string;
|
|
4397
|
+
/** Selector specifying which fields to include in a partial response. */
|
|
4398
|
+
fields?: string;
|
|
4399
|
+
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
|
|
4400
|
+
key?: string;
|
|
4401
|
+
/** Required. The fully qualified name of the spark application to write data about in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" */
|
|
4402
|
+
name: string;
|
|
4403
|
+
/** OAuth 2.0 token for the current user. */
|
|
4404
|
+
oauth_token?: string;
|
|
4405
|
+
/** Returns response with indentations and line breaks. */
|
|
4406
|
+
prettyPrint?: boolean;
|
|
4407
|
+
/** Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */
|
|
4408
|
+
quotaUser?: string;
|
|
4409
|
+
/** Upload protocol for media (e.g. "raw", "multipart"). */
|
|
4410
|
+
upload_protocol?: string;
|
|
4411
|
+
/** Legacy upload protocol for media (e.g. "media", "multipart"). */
|
|
4412
|
+
uploadType?: string;
|
|
4413
|
+
},
|
|
4414
|
+
body: WriteSessionSparkApplicationContextRequest
|
|
4415
|
+
): Request<{}>;
|
|
2133
4416
|
}
|
|
2134
4417
|
interface SessionsResource {
|
|
2135
4418
|
/** Create an interactive session asynchronously. */
|
|
@@ -2345,6 +4628,7 @@ declare namespace gapi.client {
|
|
|
2345
4628
|
},
|
|
2346
4629
|
body: TerminateSessionRequest
|
|
2347
4630
|
): Request<Operation>;
|
|
4631
|
+
sparkApplications: SparkApplicationsResource;
|
|
2348
4632
|
}
|
|
2349
4633
|
interface SessionTemplatesResource {
|
|
2350
4634
|
/** Create a session template synchronously. */
|