google-cloud-dataproc-v1 1.1.0 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +31 -21
  3. data/lib/google/cloud/dataproc/v1/autoscaling_policy_service/client.rb +36 -6
  4. data/lib/google/cloud/dataproc/v1/autoscaling_policy_service/rest/client.rb +36 -6
  5. data/lib/google/cloud/dataproc/v1/autoscaling_policy_service/rest/service_stub.rb +54 -32
  6. data/lib/google/cloud/dataproc/v1/batch_controller/client.rb +37 -5
  7. data/lib/google/cloud/dataproc/v1/batch_controller/operations.rb +19 -15
  8. data/lib/google/cloud/dataproc/v1/batch_controller/rest/client.rb +37 -5
  9. data/lib/google/cloud/dataproc/v1/batch_controller/rest/operations.rb +50 -38
  10. data/lib/google/cloud/dataproc/v1/batch_controller/rest/service_stub.rb +46 -26
  11. data/lib/google/cloud/dataproc/v1/batches_pb.rb +1 -1
  12. data/lib/google/cloud/dataproc/v1/cluster_controller/client.rb +49 -16
  13. data/lib/google/cloud/dataproc/v1/cluster_controller/operations.rb +19 -15
  14. data/lib/google/cloud/dataproc/v1/cluster_controller/paths.rb +21 -0
  15. data/lib/google/cloud/dataproc/v1/cluster_controller/rest/client.rb +49 -16
  16. data/lib/google/cloud/dataproc/v1/cluster_controller/rest/operations.rb +50 -38
  17. data/lib/google/cloud/dataproc/v1/cluster_controller/rest/service_stub.rb +78 -50
  18. data/lib/google/cloud/dataproc/v1/clusters_pb.rb +2 -1
  19. data/lib/google/cloud/dataproc/v1/job_controller/client.rb +37 -8
  20. data/lib/google/cloud/dataproc/v1/job_controller/operations.rb +19 -15
  21. data/lib/google/cloud/dataproc/v1/job_controller/rest/client.rb +37 -8
  22. data/lib/google/cloud/dataproc/v1/job_controller/rest/operations.rb +50 -38
  23. data/lib/google/cloud/dataproc/v1/job_controller/rest/service_stub.rb +70 -44
  24. data/lib/google/cloud/dataproc/v1/jobs_pb.rb +2 -1
  25. data/lib/google/cloud/dataproc/v1/node_group_controller/client.rb +37 -4
  26. data/lib/google/cloud/dataproc/v1/node_group_controller/operations.rb +19 -15
  27. data/lib/google/cloud/dataproc/v1/node_group_controller/rest/client.rb +37 -4
  28. data/lib/google/cloud/dataproc/v1/node_group_controller/rest/operations.rb +50 -38
  29. data/lib/google/cloud/dataproc/v1/node_group_controller/rest/service_stub.rb +38 -20
  30. data/lib/google/cloud/dataproc/v1/session_controller/client.rb +43 -9
  31. data/lib/google/cloud/dataproc/v1/session_controller/operations.rb +19 -15
  32. data/lib/google/cloud/dataproc/v1/session_controller/rest/client.rb +43 -9
  33. data/lib/google/cloud/dataproc/v1/session_controller/rest/operations.rb +50 -38
  34. data/lib/google/cloud/dataproc/v1/session_controller/rest/service_stub.rb +54 -32
  35. data/lib/google/cloud/dataproc/v1/session_template_controller/client.rb +36 -6
  36. data/lib/google/cloud/dataproc/v1/session_template_controller/rest/client.rb +36 -6
  37. data/lib/google/cloud/dataproc/v1/session_template_controller/rest/service_stub.rb +54 -32
  38. data/lib/google/cloud/dataproc/v1/session_templates_pb.rb +1 -1
  39. data/lib/google/cloud/dataproc/v1/sessions_pb.rb +2 -1
  40. data/lib/google/cloud/dataproc/v1/shared_pb.rb +3 -1
  41. data/lib/google/cloud/dataproc/v1/version.rb +1 -1
  42. data/lib/google/cloud/dataproc/v1/workflow_template_service/client.rb +38 -8
  43. data/lib/google/cloud/dataproc/v1/workflow_template_service/operations.rb +19 -15
  44. data/lib/google/cloud/dataproc/v1/workflow_template_service/paths.rb +21 -0
  45. data/lib/google/cloud/dataproc/v1/workflow_template_service/rest/client.rb +38 -8
  46. data/lib/google/cloud/dataproc/v1/workflow_template_service/rest/operations.rb +50 -38
  47. data/lib/google/cloud/dataproc/v1/workflow_template_service/rest/service_stub.rb +70 -44
  48. data/lib/google/cloud/dataproc/v1/workflow_templates_pb.rb +2 -1
  49. data/proto_docs/google/api/client.rb +53 -0
  50. data/proto_docs/google/cloud/dataproc/v1/batches.rb +17 -0
  51. data/proto_docs/google/cloud/dataproc/v1/clusters.rb +111 -23
  52. data/proto_docs/google/cloud/dataproc/v1/jobs.rb +130 -23
  53. data/proto_docs/google/cloud/dataproc/v1/session_templates.rb +7 -0
  54. data/proto_docs/google/cloud/dataproc/v1/sessions.rb +17 -3
  55. data/proto_docs/google/cloud/dataproc/v1/shared.rb +42 -4
  56. data/proto_docs/google/cloud/dataproc/v1/workflow_templates.rb +79 -0
  57. data/proto_docs/google/longrunning/operations.rb +23 -14
  58. metadata +6 -9
@@ -24,7 +24,7 @@ module Google
24
24
  # The runtime logging config of the job.
25
25
  # @!attribute [rw] driver_log_levels
26
26
  # @return [::Google::Protobuf::Map{::String => ::Google::Cloud::Dataproc::V1::LoggingConfig::Level}]
27
- # The per-package log levels for the driver. This may include
27
+ # The per-package log levels for the driver. This can include
28
28
  # "root" package name to configure rootLogger.
29
29
  # Examples:
30
30
  # - 'com.google = FATAL'
@@ -88,15 +88,19 @@ module Google
88
88
  # 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
89
89
  # 'hdfs:/tmp/test-samples/custom-wordcount.jar'
90
90
  # 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
91
+ #
92
+ # Note: The following fields are mutually exclusive: `main_jar_file_uri`, `main_class`. If a field in that set is populated, all other fields in the set will automatically be cleared.
91
93
  # @!attribute [rw] main_class
92
94
  # @return [::String]
93
95
  # The name of the driver's main class. The jar file containing the class
94
96
  # must be in the default CLASSPATH or specified in `jar_file_uris`.
97
+ #
98
+ # Note: The following fields are mutually exclusive: `main_class`, `main_jar_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
95
99
  # @!attribute [rw] args
96
100
  # @return [::Array<::String>]
97
101
  # Optional. The arguments to pass to the driver. Do not
98
102
  # include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
99
- # job properties, since a collision may occur that causes an incorrect job
103
+ # job properties, since a collision might occur that causes an incorrect job
100
104
  # submission.
101
105
  # @!attribute [rw] jar_file_uris
102
106
  # @return [::Array<::String>]
@@ -115,7 +119,7 @@ module Google
115
119
  # @!attribute [rw] properties
116
120
  # @return [::Google::Protobuf::Map{::String => ::String}]
117
121
  # Optional. A mapping of property names to values, used to configure Hadoop.
118
- # Properties that conflict with values set by the Dataproc API may be
122
+ # Properties that conflict with values set by the Dataproc API might be
119
123
  # overwritten. Can include properties set in `/etc/hadoop/conf/*-site` and
120
124
  # classes in user code.
121
125
  # @!attribute [rw] logging_config
@@ -140,10 +144,15 @@ module Google
140
144
  # @!attribute [rw] main_jar_file_uri
141
145
  # @return [::String]
142
146
  # The HCFS URI of the jar file that contains the main class.
147
+ #
148
+ # Note: The following fields are mutually exclusive: `main_jar_file_uri`, `main_class`. If a field in that set is populated, all other fields in the set will automatically be cleared.
143
149
  # @!attribute [rw] main_class
144
150
  # @return [::String]
145
151
  # The name of the driver's main class. The jar file that contains the class
146
- # must be in the default CLASSPATH or specified in `jar_file_uris`.
152
+ # must be in the default CLASSPATH or specified in
153
+ # SparkJob.jar_file_uris.
154
+ #
155
+ # Note: The following fields are mutually exclusive: `main_class`, `main_jar_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
147
156
  # @!attribute [rw] args
148
157
  # @return [::Array<::String>]
149
158
  # Optional. The arguments to pass to the driver. Do not include arguments,
@@ -165,7 +174,7 @@ module Google
165
174
  # @!attribute [rw] properties
166
175
  # @return [::Google::Protobuf::Map{::String => ::String}]
167
176
  # Optional. A mapping of property names to values, used to configure Spark.
168
- # Properties that conflict with values set by the Dataproc API may be
177
+ # Properties that conflict with values set by the Dataproc API might be
169
178
  # overwritten. Can include properties set in
170
179
  # /etc/spark/conf/spark-defaults.conf and classes in user code.
171
180
  # @!attribute [rw] logging_config
@@ -218,7 +227,7 @@ module Google
218
227
  # @!attribute [rw] properties
219
228
  # @return [::Google::Protobuf::Map{::String => ::String}]
220
229
  # Optional. A mapping of property names to values, used to configure PySpark.
221
- # Properties that conflict with values set by the Dataproc API may be
230
+ # Properties that conflict with values set by the Dataproc API might be
222
231
  # overwritten. Can include properties set in
223
232
  # /etc/spark/conf/spark-defaults.conf and classes in user code.
224
233
  # @!attribute [rw] logging_config
@@ -265,9 +274,13 @@ module Google
265
274
  # @!attribute [rw] query_file_uri
266
275
  # @return [::String]
267
276
  # The HCFS URI of the script that contains Hive queries.
277
+ #
278
+ # Note: The following fields are mutually exclusive: `query_file_uri`, `query_list`. If a field in that set is populated, all other fields in the set will automatically be cleared.
268
279
  # @!attribute [rw] query_list
269
280
  # @return [::Google::Cloud::Dataproc::V1::QueryList]
270
281
  # A list of queries.
282
+ #
283
+ # Note: The following fields are mutually exclusive: `query_list`, `query_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
271
284
  # @!attribute [rw] continue_on_failure
272
285
  # @return [::Boolean]
273
286
  # Optional. Whether to continue executing queries if a query fails.
@@ -280,7 +293,7 @@ module Google
280
293
  # @!attribute [rw] properties
281
294
  # @return [::Google::Protobuf::Map{::String => ::String}]
282
295
  # Optional. A mapping of property names and values, used to configure Hive.
283
- # Properties that conflict with values set by the Dataproc API may be
296
+ # Properties that conflict with values set by the Dataproc API might be
284
297
  # overwritten. Can include properties set in `/etc/hadoop/conf/*-site.xml`,
285
298
  # /etc/hive/conf/hive-site.xml, and classes in user code.
286
299
  # @!attribute [rw] jar_file_uris
@@ -316,9 +329,13 @@ module Google
316
329
  # @!attribute [rw] query_file_uri
317
330
  # @return [::String]
318
331
  # The HCFS URI of the script that contains SQL queries.
332
+ #
333
+ # Note: The following fields are mutually exclusive: `query_file_uri`, `query_list`. If a field in that set is populated, all other fields in the set will automatically be cleared.
319
334
  # @!attribute [rw] query_list
320
335
  # @return [::Google::Cloud::Dataproc::V1::QueryList]
321
336
  # A list of queries.
337
+ #
338
+ # Note: The following fields are mutually exclusive: `query_list`, `query_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
322
339
  # @!attribute [rw] script_variables
323
340
  # @return [::Google::Protobuf::Map{::String => ::String}]
324
341
  # Optional. Mapping of query variable names to values (equivalent to the
@@ -327,7 +344,7 @@ module Google
327
344
  # @return [::Google::Protobuf::Map{::String => ::String}]
328
345
  # Optional. A mapping of property names to values, used to configure
329
346
  # Spark SQL's SparkConf. Properties that conflict with values set by the
330
- # Dataproc API may be overwritten.
347
+ # Dataproc API might be overwritten.
331
348
  # @!attribute [rw] jar_file_uris
332
349
  # @return [::Array<::String>]
333
350
  # Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
@@ -362,9 +379,13 @@ module Google
362
379
  # @!attribute [rw] query_file_uri
363
380
  # @return [::String]
364
381
  # The HCFS URI of the script that contains the Pig queries.
382
+ #
383
+ # Note: The following fields are mutually exclusive: `query_file_uri`, `query_list`. If a field in that set is populated, all other fields in the set will automatically be cleared.
365
384
  # @!attribute [rw] query_list
366
385
  # @return [::Google::Cloud::Dataproc::V1::QueryList]
367
386
  # A list of queries.
387
+ #
388
+ # Note: The following fields are mutually exclusive: `query_list`, `query_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
368
389
  # @!attribute [rw] continue_on_failure
369
390
  # @return [::Boolean]
370
391
  # Optional. Whether to continue executing queries if a query fails.
@@ -377,7 +398,7 @@ module Google
377
398
  # @!attribute [rw] properties
378
399
  # @return [::Google::Protobuf::Map{::String => ::String}]
379
400
  # Optional. A mapping of property names to values, used to configure Pig.
380
- # Properties that conflict with values set by the Dataproc API may be
401
+ # Properties that conflict with values set by the Dataproc API might be
381
402
  # overwritten. Can include properties set in `/etc/hadoop/conf/*-site.xml`,
382
403
  # /etc/pig/conf/pig.properties, and classes in user code.
383
404
  # @!attribute [rw] jar_file_uris
@@ -434,7 +455,7 @@ module Google
434
455
  # @!attribute [rw] properties
435
456
  # @return [::Google::Protobuf::Map{::String => ::String}]
436
457
  # Optional. A mapping of property names to values, used to configure SparkR.
437
- # Properties that conflict with values set by the Dataproc API may be
458
+ # Properties that conflict with values set by the Dataproc API might be
438
459
  # overwritten. Can include properties set in
439
460
  # /etc/spark/conf/spark-defaults.conf and classes in user code.
440
461
  # @!attribute [rw] logging_config
@@ -462,9 +483,13 @@ module Google
462
483
  # @!attribute [rw] query_file_uri
463
484
  # @return [::String]
464
485
  # The HCFS URI of the script that contains SQL queries.
486
+ #
487
+ # Note: The following fields are mutually exclusive: `query_file_uri`, `query_list`. If a field in that set is populated, all other fields in the set will automatically be cleared.
465
488
  # @!attribute [rw] query_list
466
489
  # @return [::Google::Cloud::Dataproc::V1::QueryList]
467
490
  # A list of queries.
491
+ #
492
+ # Note: The following fields are mutually exclusive: `query_list`, `query_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
468
493
  # @!attribute [rw] continue_on_failure
469
494
  # @return [::Boolean]
470
495
  # Optional. Whether to continue executing queries if a query fails.
@@ -507,9 +532,13 @@ module Google
507
532
  # @!attribute [rw] query_file_uri
508
533
  # @return [::String]
509
534
  # The HCFS URI of the script that contains SQL queries.
535
+ #
536
+ # Note: The following fields are mutually exclusive: `query_file_uri`, `query_list`. If a field in that set is populated, all other fields in the set will automatically be cleared.
510
537
  # @!attribute [rw] query_list
511
538
  # @return [::Google::Cloud::Dataproc::V1::QueryList]
512
539
  # A list of queries.
540
+ #
541
+ # Note: The following fields are mutually exclusive: `query_list`, `query_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
513
542
  # @!attribute [rw] continue_on_failure
514
543
  # @return [::Boolean]
515
544
  # Optional. Whether to continue executing queries if a query fails.
@@ -544,6 +573,55 @@ module Google
544
573
  end
545
574
  end
546
575
 
576
+ # A Dataproc job for running Apache Flink applications on YARN.
577
+ # @!attribute [rw] main_jar_file_uri
578
+ # @return [::String]
579
+ # The HCFS URI of the jar file that contains the main class.
580
+ #
581
+ # Note: The following fields are mutually exclusive: `main_jar_file_uri`, `main_class`. If a field in that set is populated, all other fields in the set will automatically be cleared.
582
+ # @!attribute [rw] main_class
583
+ # @return [::String]
584
+ # The name of the driver's main class. The jar file that contains the class
585
+ # must be in the default CLASSPATH or specified in
586
+ # {::Google::Cloud::Dataproc::V1::FlinkJob#jar_file_uris jarFileUris}.
587
+ #
588
+ # Note: The following fields are mutually exclusive: `main_class`, `main_jar_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
589
+ # @!attribute [rw] args
590
+ # @return [::Array<::String>]
591
+ # Optional. The arguments to pass to the driver. Do not include arguments,
592
+ # such as `--conf`, that can be set as job properties, since a collision
593
+ # might occur that causes an incorrect job submission.
594
+ # @!attribute [rw] jar_file_uris
595
+ # @return [::Array<::String>]
596
+ # Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
597
+ # Flink driver and tasks.
598
+ # @!attribute [rw] savepoint_uri
599
+ # @return [::String]
600
+ # Optional. HCFS URI of the savepoint, which contains the last saved progress
601
+ # for starting the current job.
602
+ # @!attribute [rw] properties
603
+ # @return [::Google::Protobuf::Map{::String => ::String}]
604
+ # Optional. A mapping of property names to values, used to configure Flink.
605
+ # Properties that conflict with values set by the Dataproc API might be
606
+ # overwritten. Can include properties set in
607
+ # `/etc/flink/conf/flink-defaults.conf` and classes in user code.
608
+ # @!attribute [rw] logging_config
609
+ # @return [::Google::Cloud::Dataproc::V1::LoggingConfig]
610
+ # Optional. The runtime log config for job execution.
611
+ class FlinkJob
612
+ include ::Google::Protobuf::MessageExts
613
+ extend ::Google::Protobuf::MessageExts::ClassMethods
614
+
615
+ # @!attribute [rw] key
616
+ # @return [::String]
617
+ # @!attribute [rw] value
618
+ # @return [::String]
619
+ class PropertiesEntry
620
+ include ::Google::Protobuf::MessageExts
621
+ extend ::Google::Protobuf::MessageExts::ClassMethods
622
+ end
623
+ end
624
+
547
625
  # Dataproc job config.
548
626
  # @!attribute [rw] cluster_name
549
627
  # @return [::String]
@@ -577,7 +655,7 @@ module Google
577
655
  # @!attribute [r] details
578
656
  # @return [::String]
579
657
  # Optional. Output only. Job state details, such as an error
580
- # description if the state is <code>ERROR</code>.
658
+ # description if the state is `ERROR`.
581
659
  # @!attribute [r] state_start_time
582
660
  # @return [::Google::Protobuf::Timestamp]
583
661
  # Output only. The time when this state was entered.
@@ -637,14 +715,14 @@ module Google
637
715
  # Applies to RUNNING state.
638
716
  SUBMITTED = 1
639
717
 
640
- # The Job has been received and is awaiting execution (it may be waiting
718
+ # The Job has been received and is awaiting execution (it might be waiting
641
719
  # for a condition to be met). See the "details" field for the reason for
642
720
  # the delay.
643
721
  #
644
722
  # Applies to RUNNING state.
645
723
  QUEUED = 2
646
724
 
647
- # The agent-reported status is out of date, which may be caused by a
725
+ # The agent-reported status is out of date, which can be caused by a
648
726
  # loss of communication between the agent and Dataproc. If the
649
727
  # agent does not send a timely update, the job will fail.
650
728
  #
@@ -741,34 +819,57 @@ module Google
741
819
  # @!attribute [rw] hadoop_job
742
820
  # @return [::Google::Cloud::Dataproc::V1::HadoopJob]
743
821
  # Optional. Job is a Hadoop job.
822
+ #
823
+ # Note: The following fields are mutually exclusive: `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
744
824
  # @!attribute [rw] spark_job
745
825
  # @return [::Google::Cloud::Dataproc::V1::SparkJob]
746
826
  # Optional. Job is a Spark job.
827
+ #
828
+ # Note: The following fields are mutually exclusive: `spark_job`, `hadoop_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
747
829
  # @!attribute [rw] pyspark_job
748
830
  # @return [::Google::Cloud::Dataproc::V1::PySparkJob]
749
831
  # Optional. Job is a PySpark job.
832
+ #
833
+ # Note: The following fields are mutually exclusive: `pyspark_job`, `hadoop_job`, `spark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
750
834
  # @!attribute [rw] hive_job
751
835
  # @return [::Google::Cloud::Dataproc::V1::HiveJob]
752
836
  # Optional. Job is a Hive job.
837
+ #
838
+ # Note: The following fields are mutually exclusive: `hive_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
753
839
  # @!attribute [rw] pig_job
754
840
  # @return [::Google::Cloud::Dataproc::V1::PigJob]
755
841
  # Optional. Job is a Pig job.
842
+ #
843
+ # Note: The following fields are mutually exclusive: `pig_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
756
844
  # @!attribute [rw] spark_r_job
757
845
  # @return [::Google::Cloud::Dataproc::V1::SparkRJob]
758
846
  # Optional. Job is a SparkR job.
847
+ #
848
+ # Note: The following fields are mutually exclusive: `spark_r_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
759
849
  # @!attribute [rw] spark_sql_job
760
850
  # @return [::Google::Cloud::Dataproc::V1::SparkSqlJob]
761
851
  # Optional. Job is a SparkSql job.
852
+ #
853
+ # Note: The following fields are mutually exclusive: `spark_sql_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
762
854
  # @!attribute [rw] presto_job
763
855
  # @return [::Google::Cloud::Dataproc::V1::PrestoJob]
764
856
  # Optional. Job is a Presto job.
857
+ #
858
+ # Note: The following fields are mutually exclusive: `presto_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
765
859
  # @!attribute [rw] trino_job
766
860
  # @return [::Google::Cloud::Dataproc::V1::TrinoJob]
767
861
  # Optional. Job is a Trino job.
862
+ #
863
+ # Note: The following fields are mutually exclusive: `trino_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
864
+ # @!attribute [rw] flink_job
865
+ # @return [::Google::Cloud::Dataproc::V1::FlinkJob]
866
+ # Optional. Job is a Flink job.
867
+ #
868
+ # Note: The following fields are mutually exclusive: `flink_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
768
869
  # @!attribute [r] status
769
870
  # @return [::Google::Cloud::Dataproc::V1::JobStatus]
770
871
  # Output only. The job status. Additional application-specific
771
- # status information may be contained in the <code>type_job</code>
872
+ # status information might be contained in the <code>type_job</code>
772
873
  # and <code>yarn_applications</code> fields.
773
874
  # @!attribute [r] status_history
774
875
  # @return [::Array<::Google::Cloud::Dataproc::V1::JobStatus>]
@@ -778,7 +879,7 @@ module Google
778
879
  # Output only. The collection of YARN applications spun up by this job.
779
880
  #
780
881
  # **Beta** Feature: This report is available for testing purposes only. It
781
- # may be changed before final release.
882
+ # might be changed before final release.
782
883
  # @!attribute [r] driver_output_resource_uri
783
884
  # @return [::String]
784
885
  # Output only. A URI pointing to the location of the stdout of the job's
@@ -786,14 +887,14 @@ module Google
786
887
  # @!attribute [r] driver_control_files_uri
787
888
  # @return [::String]
788
889
  # Output only. If present, the location of miscellaneous control files
789
- # which may be used as part of job setup and handling. If not present,
790
- # control files may be placed in the same location as `driver_output_uri`.
890
+ # which can be used as part of job setup and handling. If not present,
891
+ # control files might be placed in the same location as `driver_output_uri`.
791
892
  # @!attribute [rw] labels
792
893
  # @return [::Google::Protobuf::Map{::String => ::String}]
793
894
  # Optional. The labels to associate with this job.
794
895
  # Label **keys** must contain 1 to 63 characters, and must conform to
795
896
  # [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
796
- # Label **values** may be empty, but, if present, must contain 1 to 63
897
+ # Label **values** can be empty, but, if present, must contain 1 to 63
797
898
  # characters, and must conform to [RFC
798
899
  # 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
799
900
  # associated with a job.
@@ -804,7 +905,7 @@ module Google
804
905
  # @return [::String]
805
906
  # Output only. A UUID that uniquely identifies a job within the project
806
907
  # over time. This is in contrast to a user-settable reference.job_id that
807
- # may be reused over time.
908
+ # might be reused over time.
808
909
  # @!attribute [r] done
809
910
  # @return [::Boolean]
810
911
  # Output only. Indicates whether the job is completed. If the value is
@@ -843,12 +944,12 @@ module Google
843
944
  # Job scheduling options.
844
945
  # @!attribute [rw] max_failures_per_hour
845
946
  # @return [::Integer]
846
- # Optional. Maximum number of times per hour a driver may be restarted as
947
+ # Optional. Maximum number of times per hour a driver can be restarted as
847
948
  # a result of driver exiting with non-zero code before job is
848
949
  # reported failed.
849
950
  #
850
- # A job may be reported as thrashing if the driver exits with a non-zero code
851
- # four times within a 10-minute window.
951
+ # A job might be reported as thrashing if the driver exits with a non-zero
952
+ # code four times within a 10-minute window.
852
953
  #
853
954
  # Maximum value is 10.
854
955
  #
@@ -857,7 +958,7 @@ module Google
857
958
  # (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).
858
959
  # @!attribute [rw] max_failures_total
859
960
  # @return [::Integer]
860
- # Optional. Maximum total number of times a driver may be restarted as a
961
+ # Optional. Maximum total number of times a driver can be restarted as a
861
962
  # result of the driver exiting with a non-zero code. After the maximum number
862
963
  # is reached, the job will be reported as failed.
863
964
  #
@@ -1031,6 +1132,12 @@ module Google
1031
1132
  # Optional. This token is included in the response if there are more results
1032
1133
  # to fetch. To fetch additional results, provide this value as the
1033
1134
  # `page_token` in a subsequent <code>ListJobsRequest</code>.
1135
+ # @!attribute [r] unreachable
1136
+ # @return [::Array<::String>]
1137
+ # Output only. List of jobs with
1138
+ # {::Google::Cloud::Dataproc::V1::EncryptionConfig#kms_key kms_key}-encrypted
1139
+ # parameters that could not be decrypted. A response to a `jobs.get` request
1140
+ # may indicate the reason for the decryption failure for a specific job.
1034
1141
  class ListJobsResponse
1035
1142
  include ::Google::Protobuf::MessageExts
1036
1143
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -109,6 +109,13 @@ module Google
109
109
  # @!attribute [rw] jupyter_session
110
110
  # @return [::Google::Cloud::Dataproc::V1::JupyterConfig]
111
111
  # Optional. Jupyter session config.
112
+ #
113
+ # Note: The following fields are mutually exclusive: `jupyter_session`, `spark_connect_session`. If a field in that set is populated, all other fields in the set will automatically be cleared.
114
+ # @!attribute [rw] spark_connect_session
115
+ # @return [::Google::Cloud::Dataproc::V1::SparkConnectConfig]
116
+ # Optional. Spark Connect session config.
117
+ #
118
+ # Note: The following fields are mutually exclusive: `spark_connect_session`, `jupyter_session`. If a field in that set is populated, all other fields in the set will automatically be cleared.
112
119
  # @!attribute [r] creator
113
120
  # @return [::String]
114
121
  # Output only. The email address of the user who created the template.
@@ -82,12 +82,13 @@ module Google
82
82
  # A filter is a logical expression constraining the values of various fields
83
83
  # in each session resource. Filters are case sensitive, and may contain
84
84
  # multiple clauses combined with logical operators (AND, OR).
85
- # Supported fields are `session_id`, `session_uuid`, `state`, and
86
- # `create_time`.
85
+ # Supported fields are `session_id`, `session_uuid`, `state`, `create_time`,
86
+ # and `labels`.
87
87
  #
88
88
  # Example: `state = ACTIVE and create_time < "2023-01-01T00:00:00Z"`
89
89
  # is a filter for sessions in an ACTIVE state that were created before
90
- # 2023-01-01.
90
+ # 2023-01-01. `state = ACTIVE and labels.environment=production` is a filter
91
+ # for sessions in an ACTIVE state that have a production environment label.
91
92
  #
92
93
  # See https://google.aip.dev/assets/misc/ebnf-filtering.txt for a detailed
93
94
  # description of the filter syntax and a list of supported comparators.
@@ -165,6 +166,13 @@ module Google
165
166
  # @!attribute [rw] jupyter_session
166
167
  # @return [::Google::Cloud::Dataproc::V1::JupyterConfig]
167
168
  # Optional. Jupyter session config.
169
+ #
170
+ # Note: The following fields are mutually exclusive: `jupyter_session`, `spark_connect_session`. If a field in that set is populated, all other fields in the set will automatically be cleared.
171
+ # @!attribute [rw] spark_connect_session
172
+ # @return [::Google::Cloud::Dataproc::V1::SparkConnectConfig]
173
+ # Optional. Spark Connect session config.
174
+ #
175
+ # Note: The following fields are mutually exclusive: `spark_connect_session`, `jupyter_session`. If a field in that set is populated, all other fields in the set will automatically be cleared.
168
176
  # @!attribute [r] runtime_info
169
177
  # @return [::Google::Cloud::Dataproc::V1::RuntimeInfo]
170
178
  # Output only. Runtime information about session execution.
@@ -289,6 +297,12 @@ module Google
289
297
  SCALA = 2
290
298
  end
291
299
  end
300
+
301
+ # Spark Connect configuration for an interactive session.
302
+ class SparkConnectConfig
303
+ include ::Google::Protobuf::MessageExts
304
+ extend ::Google::Protobuf::MessageExts::ClassMethods
305
+ end
292
306
  end
293
307
  end
294
308
  end
@@ -76,9 +76,13 @@ module Google
76
76
  # @!attribute [rw] network_uri
77
77
  # @return [::String]
78
78
  # Optional. Network URI to connect workload to.
79
+ #
80
+ # Note: The following fields are mutually exclusive: `network_uri`, `subnetwork_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
79
81
  # @!attribute [rw] subnetwork_uri
80
82
  # @return [::String]
81
83
  # Optional. Subnetwork URI to connect workload to.
84
+ #
85
+ # Note: The following fields are mutually exclusive: `subnetwork_uri`, `network_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
82
86
  # @!attribute [rw] network_tags
83
87
  # @return [::Array<::String>]
84
88
  # Optional. Tags used for network traffic control.
@@ -123,6 +127,12 @@ module Google
123
127
  # staging and temporary buckets.
124
128
  # **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
125
129
  # a Cloud Storage bucket.**
130
+ # @!attribute [rw] authentication_config
131
+ # @return [::Google::Cloud::Dataproc::V1::AuthenticationConfig]
132
+ # Optional. Authentication configuration used to set the default identity for
133
+ # the workload execution. The config specifies the type of identity
134
+ # (service account or user) that will be used by workloads to access
135
+ # resources on the project(s).
126
136
  class ExecutionConfig
127
137
  include ::Google::Protobuf::MessageExts
128
138
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -515,6 +525,33 @@ module Google
515
525
  end
516
526
  end
517
527
 
528
+ # Authentication configuration for a workload is used to set the default
529
+ # identity for the workload execution.
530
+ # The config specifies the type of identity (service account or user) that
531
+ # will be used by workloads to access resources on the project(s).
532
+ # @!attribute [rw] user_workload_authentication_type
533
+ # @return [::Google::Cloud::Dataproc::V1::AuthenticationConfig::AuthenticationType]
534
+ # Optional. Authentication type for the user workload running in containers.
535
+ class AuthenticationConfig
536
+ include ::Google::Protobuf::MessageExts
537
+ extend ::Google::Protobuf::MessageExts::ClassMethods
538
+
539
+ # Authentication types for workload execution.
540
+ module AuthenticationType
541
+ # If AuthenticationType is unspecified then END_USER_CREDENTIALS is used
542
+ # for 3.0 and newer runtimes, and SERVICE_ACCOUNT is used for older
543
+ # runtimes.
544
+ AUTHENTICATION_TYPE_UNSPECIFIED = 0
545
+
546
+ # Use service account credentials for authenticating to other services.
547
+ SERVICE_ACCOUNT = 1
548
+
549
+ # Use OAuth credentials associated with the workload creator/user for
550
+ # authenticating to other services.
551
+ END_USER_CREDENTIALS = 2
552
+ end
553
+ end
554
+
518
555
  # Autotuning configuration of the workload.
519
556
  # @!attribute [rw] scenarios
520
557
  # @return [::Array<::Google::Cloud::Dataproc::V1::AutotuningConfig::Scenario>]
@@ -563,10 +600,11 @@ module Google
563
600
  # Unspecified component. Specifying this will cause Cluster creation to fail.
564
601
  COMPONENT_UNSPECIFIED = 0
565
602
 
566
- # The Anaconda python distribution. The Anaconda component is not supported
567
- # in the Dataproc [2.0 image]
568
- # (/https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-release-2.0).
569
- # The 2.0 image is pre-installed with Miniconda.
603
+ # The Anaconda component is no longer supported or applicable to
604
+ # [supported Dataproc on Compute Engine image versions]
605
+ # (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-version-clusters#supported-dataproc-image-versions).
606
+ # It cannot be activated on clusters created with supported Dataproc on
607
+ # Compute Engine image versions.
570
608
  ANACONDA = 5
571
609
 
572
610
  # Docker