google-cloud-dataproc-v1 1.0.2 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +31 -21
  3. data/lib/google/cloud/dataproc/v1/autoscaling_policy_service/client.rb +36 -6
  4. data/lib/google/cloud/dataproc/v1/autoscaling_policy_service/rest/client.rb +36 -6
  5. data/lib/google/cloud/dataproc/v1/autoscaling_policy_service/rest/service_stub.rb +54 -32
  6. data/lib/google/cloud/dataproc/v1/batch_controller/client.rb +37 -5
  7. data/lib/google/cloud/dataproc/v1/batch_controller/operations.rb +19 -15
  8. data/lib/google/cloud/dataproc/v1/batch_controller/rest/client.rb +37 -5
  9. data/lib/google/cloud/dataproc/v1/batch_controller/rest/operations.rb +50 -38
  10. data/lib/google/cloud/dataproc/v1/batch_controller/rest/service_stub.rb +46 -26
  11. data/lib/google/cloud/dataproc/v1/batches_pb.rb +1 -1
  12. data/lib/google/cloud/dataproc/v1/cluster_controller/client.rb +53 -17
  13. data/lib/google/cloud/dataproc/v1/cluster_controller/operations.rb +19 -15
  14. data/lib/google/cloud/dataproc/v1/cluster_controller/paths.rb +21 -0
  15. data/lib/google/cloud/dataproc/v1/cluster_controller/rest/client.rb +53 -17
  16. data/lib/google/cloud/dataproc/v1/cluster_controller/rest/operations.rb +50 -38
  17. data/lib/google/cloud/dataproc/v1/cluster_controller/rest/service_stub.rb +78 -50
  18. data/lib/google/cloud/dataproc/v1/clusters_pb.rb +3 -1
  19. data/lib/google/cloud/dataproc/v1/job_controller/client.rb +37 -8
  20. data/lib/google/cloud/dataproc/v1/job_controller/operations.rb +19 -15
  21. data/lib/google/cloud/dataproc/v1/job_controller/rest/client.rb +37 -8
  22. data/lib/google/cloud/dataproc/v1/job_controller/rest/operations.rb +50 -38
  23. data/lib/google/cloud/dataproc/v1/job_controller/rest/service_stub.rb +70 -44
  24. data/lib/google/cloud/dataproc/v1/jobs_pb.rb +2 -1
  25. data/lib/google/cloud/dataproc/v1/node_group_controller/client.rb +37 -4
  26. data/lib/google/cloud/dataproc/v1/node_group_controller/operations.rb +19 -15
  27. data/lib/google/cloud/dataproc/v1/node_group_controller/rest/client.rb +37 -4
  28. data/lib/google/cloud/dataproc/v1/node_group_controller/rest/operations.rb +50 -38
  29. data/lib/google/cloud/dataproc/v1/node_group_controller/rest/service_stub.rb +38 -20
  30. data/lib/google/cloud/dataproc/v1/session_controller/client.rb +43 -9
  31. data/lib/google/cloud/dataproc/v1/session_controller/operations.rb +19 -15
  32. data/lib/google/cloud/dataproc/v1/session_controller/rest/client.rb +43 -9
  33. data/lib/google/cloud/dataproc/v1/session_controller/rest/operations.rb +50 -38
  34. data/lib/google/cloud/dataproc/v1/session_controller/rest/service_stub.rb +54 -32
  35. data/lib/google/cloud/dataproc/v1/session_template_controller/client.rb +36 -6
  36. data/lib/google/cloud/dataproc/v1/session_template_controller/rest/client.rb +36 -6
  37. data/lib/google/cloud/dataproc/v1/session_template_controller/rest/service_stub.rb +54 -32
  38. data/lib/google/cloud/dataproc/v1/session_templates_pb.rb +1 -1
  39. data/lib/google/cloud/dataproc/v1/sessions_pb.rb +2 -1
  40. data/lib/google/cloud/dataproc/v1/shared_pb.rb +1 -1
  41. data/lib/google/cloud/dataproc/v1/version.rb +1 -1
  42. data/lib/google/cloud/dataproc/v1/workflow_template_service/client.rb +38 -8
  43. data/lib/google/cloud/dataproc/v1/workflow_template_service/operations.rb +19 -15
  44. data/lib/google/cloud/dataproc/v1/workflow_template_service/paths.rb +21 -0
  45. data/lib/google/cloud/dataproc/v1/workflow_template_service/rest/client.rb +38 -8
  46. data/lib/google/cloud/dataproc/v1/workflow_template_service/rest/operations.rb +50 -38
  47. data/lib/google/cloud/dataproc/v1/workflow_template_service/rest/service_stub.rb +70 -44
  48. data/lib/google/cloud/dataproc/v1/workflow_templates_pb.rb +2 -1
  49. data/proto_docs/google/api/client.rb +47 -0
  50. data/proto_docs/google/cloud/dataproc/v1/batches.rb +17 -0
  51. data/proto_docs/google/cloud/dataproc/v1/clusters.rb +129 -23
  52. data/proto_docs/google/cloud/dataproc/v1/jobs.rb +130 -23
  53. data/proto_docs/google/cloud/dataproc/v1/session_templates.rb +7 -0
  54. data/proto_docs/google/cloud/dataproc/v1/sessions.rb +17 -3
  55. data/proto_docs/google/cloud/dataproc/v1/shared.rb +9 -4
  56. data/proto_docs/google/cloud/dataproc/v1/workflow_templates.rb +79 -0
  57. data/proto_docs/google/longrunning/operations.rb +23 -14
  58. metadata +6 -9
@@ -24,7 +24,7 @@ module Google
24
24
  # The runtime logging config of the job.
25
25
  # @!attribute [rw] driver_log_levels
26
26
  # @return [::Google::Protobuf::Map{::String => ::Google::Cloud::Dataproc::V1::LoggingConfig::Level}]
27
- # The per-package log levels for the driver. This may include
27
+ # The per-package log levels for the driver. This can include
28
28
  # "root" package name to configure rootLogger.
29
29
  # Examples:
30
30
  # - 'com.google = FATAL'
@@ -88,15 +88,19 @@ module Google
88
88
  # 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
89
89
  # 'hdfs:/tmp/test-samples/custom-wordcount.jar'
90
90
  # 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
91
+ #
92
+ # Note: The following fields are mutually exclusive: `main_jar_file_uri`, `main_class`. If a field in that set is populated, all other fields in the set will automatically be cleared.
91
93
  # @!attribute [rw] main_class
92
94
  # @return [::String]
93
95
  # The name of the driver's main class. The jar file containing the class
94
96
  # must be in the default CLASSPATH or specified in `jar_file_uris`.
97
+ #
98
+ # Note: The following fields are mutually exclusive: `main_class`, `main_jar_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
95
99
  # @!attribute [rw] args
96
100
  # @return [::Array<::String>]
97
101
  # Optional. The arguments to pass to the driver. Do not
98
102
  # include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
99
- # job properties, since a collision may occur that causes an incorrect job
103
+ # job properties, since a collision might occur that causes an incorrect job
100
104
  # submission.
101
105
  # @!attribute [rw] jar_file_uris
102
106
  # @return [::Array<::String>]
@@ -115,7 +119,7 @@ module Google
115
119
  # @!attribute [rw] properties
116
120
  # @return [::Google::Protobuf::Map{::String => ::String}]
117
121
  # Optional. A mapping of property names to values, used to configure Hadoop.
118
- # Properties that conflict with values set by the Dataproc API may be
122
+ # Properties that conflict with values set by the Dataproc API might be
119
123
  # overwritten. Can include properties set in `/etc/hadoop/conf/*-site` and
120
124
  # classes in user code.
121
125
  # @!attribute [rw] logging_config
@@ -140,10 +144,15 @@ module Google
140
144
  # @!attribute [rw] main_jar_file_uri
141
145
  # @return [::String]
142
146
  # The HCFS URI of the jar file that contains the main class.
147
+ #
148
+ # Note: The following fields are mutually exclusive: `main_jar_file_uri`, `main_class`. If a field in that set is populated, all other fields in the set will automatically be cleared.
143
149
  # @!attribute [rw] main_class
144
150
  # @return [::String]
145
151
  # The name of the driver's main class. The jar file that contains the class
146
- # must be in the default CLASSPATH or specified in `jar_file_uris`.
152
+ # must be in the default CLASSPATH or specified in
153
+ # SparkJob.jar_file_uris.
154
+ #
155
+ # Note: The following fields are mutually exclusive: `main_class`, `main_jar_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
147
156
  # @!attribute [rw] args
148
157
  # @return [::Array<::String>]
149
158
  # Optional. The arguments to pass to the driver. Do not include arguments,
@@ -165,7 +174,7 @@ module Google
165
174
  # @!attribute [rw] properties
166
175
  # @return [::Google::Protobuf::Map{::String => ::String}]
167
176
  # Optional. A mapping of property names to values, used to configure Spark.
168
- # Properties that conflict with values set by the Dataproc API may be
177
+ # Properties that conflict with values set by the Dataproc API might be
169
178
  # overwritten. Can include properties set in
170
179
  # /etc/spark/conf/spark-defaults.conf and classes in user code.
171
180
  # @!attribute [rw] logging_config
@@ -218,7 +227,7 @@ module Google
218
227
  # @!attribute [rw] properties
219
228
  # @return [::Google::Protobuf::Map{::String => ::String}]
220
229
  # Optional. A mapping of property names to values, used to configure PySpark.
221
- # Properties that conflict with values set by the Dataproc API may be
230
+ # Properties that conflict with values set by the Dataproc API might be
222
231
  # overwritten. Can include properties set in
223
232
  # /etc/spark/conf/spark-defaults.conf and classes in user code.
224
233
  # @!attribute [rw] logging_config
@@ -265,9 +274,13 @@ module Google
265
274
  # @!attribute [rw] query_file_uri
266
275
  # @return [::String]
267
276
  # The HCFS URI of the script that contains Hive queries.
277
+ #
278
+ # Note: The following fields are mutually exclusive: `query_file_uri`, `query_list`. If a field in that set is populated, all other fields in the set will automatically be cleared.
268
279
  # @!attribute [rw] query_list
269
280
  # @return [::Google::Cloud::Dataproc::V1::QueryList]
270
281
  # A list of queries.
282
+ #
283
+ # Note: The following fields are mutually exclusive: `query_list`, `query_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
271
284
  # @!attribute [rw] continue_on_failure
272
285
  # @return [::Boolean]
273
286
  # Optional. Whether to continue executing queries if a query fails.
@@ -280,7 +293,7 @@ module Google
280
293
  # @!attribute [rw] properties
281
294
  # @return [::Google::Protobuf::Map{::String => ::String}]
282
295
  # Optional. A mapping of property names and values, used to configure Hive.
283
- # Properties that conflict with values set by the Dataproc API may be
296
+ # Properties that conflict with values set by the Dataproc API might be
284
297
  # overwritten. Can include properties set in `/etc/hadoop/conf/*-site.xml`,
285
298
  # /etc/hive/conf/hive-site.xml, and classes in user code.
286
299
  # @!attribute [rw] jar_file_uris
@@ -316,9 +329,13 @@ module Google
316
329
  # @!attribute [rw] query_file_uri
317
330
  # @return [::String]
318
331
  # The HCFS URI of the script that contains SQL queries.
332
+ #
333
+ # Note: The following fields are mutually exclusive: `query_file_uri`, `query_list`. If a field in that set is populated, all other fields in the set will automatically be cleared.
319
334
  # @!attribute [rw] query_list
320
335
  # @return [::Google::Cloud::Dataproc::V1::QueryList]
321
336
  # A list of queries.
337
+ #
338
+ # Note: The following fields are mutually exclusive: `query_list`, `query_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
322
339
  # @!attribute [rw] script_variables
323
340
  # @return [::Google::Protobuf::Map{::String => ::String}]
324
341
  # Optional. Mapping of query variable names to values (equivalent to the
@@ -327,7 +344,7 @@ module Google
327
344
  # @return [::Google::Protobuf::Map{::String => ::String}]
328
345
  # Optional. A mapping of property names to values, used to configure
329
346
  # Spark SQL's SparkConf. Properties that conflict with values set by the
330
- # Dataproc API may be overwritten.
347
+ # Dataproc API might be overwritten.
331
348
  # @!attribute [rw] jar_file_uris
332
349
  # @return [::Array<::String>]
333
350
  # Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
@@ -362,9 +379,13 @@ module Google
362
379
  # @!attribute [rw] query_file_uri
363
380
  # @return [::String]
364
381
  # The HCFS URI of the script that contains the Pig queries.
382
+ #
383
+ # Note: The following fields are mutually exclusive: `query_file_uri`, `query_list`. If a field in that set is populated, all other fields in the set will automatically be cleared.
365
384
  # @!attribute [rw] query_list
366
385
  # @return [::Google::Cloud::Dataproc::V1::QueryList]
367
386
  # A list of queries.
387
+ #
388
+ # Note: The following fields are mutually exclusive: `query_list`, `query_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
368
389
  # @!attribute [rw] continue_on_failure
369
390
  # @return [::Boolean]
370
391
  # Optional. Whether to continue executing queries if a query fails.
@@ -377,7 +398,7 @@ module Google
377
398
  # @!attribute [rw] properties
378
399
  # @return [::Google::Protobuf::Map{::String => ::String}]
379
400
  # Optional. A mapping of property names to values, used to configure Pig.
380
- # Properties that conflict with values set by the Dataproc API may be
401
+ # Properties that conflict with values set by the Dataproc API might be
381
402
  # overwritten. Can include properties set in `/etc/hadoop/conf/*-site.xml`,
382
403
  # /etc/pig/conf/pig.properties, and classes in user code.
383
404
  # @!attribute [rw] jar_file_uris
@@ -434,7 +455,7 @@ module Google
434
455
  # @!attribute [rw] properties
435
456
  # @return [::Google::Protobuf::Map{::String => ::String}]
436
457
  # Optional. A mapping of property names to values, used to configure SparkR.
437
- # Properties that conflict with values set by the Dataproc API may be
458
+ # Properties that conflict with values set by the Dataproc API might be
438
459
  # overwritten. Can include properties set in
439
460
  # /etc/spark/conf/spark-defaults.conf and classes in user code.
440
461
  # @!attribute [rw] logging_config
@@ -462,9 +483,13 @@ module Google
462
483
  # @!attribute [rw] query_file_uri
463
484
  # @return [::String]
464
485
  # The HCFS URI of the script that contains SQL queries.
486
+ #
487
+ # Note: The following fields are mutually exclusive: `query_file_uri`, `query_list`. If a field in that set is populated, all other fields in the set will automatically be cleared.
465
488
  # @!attribute [rw] query_list
466
489
  # @return [::Google::Cloud::Dataproc::V1::QueryList]
467
490
  # A list of queries.
491
+ #
492
+ # Note: The following fields are mutually exclusive: `query_list`, `query_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
468
493
  # @!attribute [rw] continue_on_failure
469
494
  # @return [::Boolean]
470
495
  # Optional. Whether to continue executing queries if a query fails.
@@ -507,9 +532,13 @@ module Google
507
532
  # @!attribute [rw] query_file_uri
508
533
  # @return [::String]
509
534
  # The HCFS URI of the script that contains SQL queries.
535
+ #
536
+ # Note: The following fields are mutually exclusive: `query_file_uri`, `query_list`. If a field in that set is populated, all other fields in the set will automatically be cleared.
510
537
  # @!attribute [rw] query_list
511
538
  # @return [::Google::Cloud::Dataproc::V1::QueryList]
512
539
  # A list of queries.
540
+ #
541
+ # Note: The following fields are mutually exclusive: `query_list`, `query_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
513
542
  # @!attribute [rw] continue_on_failure
514
543
  # @return [::Boolean]
515
544
  # Optional. Whether to continue executing queries if a query fails.
@@ -544,6 +573,55 @@ module Google
544
573
  end
545
574
  end
546
575
 
576
+ # A Dataproc job for running Apache Flink applications on YARN.
577
+ # @!attribute [rw] main_jar_file_uri
578
+ # @return [::String]
579
+ # The HCFS URI of the jar file that contains the main class.
580
+ #
581
+ # Note: The following fields are mutually exclusive: `main_jar_file_uri`, `main_class`. If a field in that set is populated, all other fields in the set will automatically be cleared.
582
+ # @!attribute [rw] main_class
583
+ # @return [::String]
584
+ # The name of the driver's main class. The jar file that contains the class
585
+ # must be in the default CLASSPATH or specified in
586
+ # {::Google::Cloud::Dataproc::V1::FlinkJob#jar_file_uris jarFileUris}.
587
+ #
588
+ # Note: The following fields are mutually exclusive: `main_class`, `main_jar_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
589
+ # @!attribute [rw] args
590
+ # @return [::Array<::String>]
591
+ # Optional. The arguments to pass to the driver. Do not include arguments,
592
+ # such as `--conf`, that can be set as job properties, since a collision
593
+ # might occur that causes an incorrect job submission.
594
+ # @!attribute [rw] jar_file_uris
595
+ # @return [::Array<::String>]
596
+ # Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
597
+ # Flink driver and tasks.
598
+ # @!attribute [rw] savepoint_uri
599
+ # @return [::String]
600
+ # Optional. HCFS URI of the savepoint, which contains the last saved progress
601
+ # for starting the current job.
602
+ # @!attribute [rw] properties
603
+ # @return [::Google::Protobuf::Map{::String => ::String}]
604
+ # Optional. A mapping of property names to values, used to configure Flink.
605
+ # Properties that conflict with values set by the Dataproc API might be
606
+ # overwritten. Can include properties set in
607
+ # `/etc/flink/conf/flink-defaults.conf` and classes in user code.
608
+ # @!attribute [rw] logging_config
609
+ # @return [::Google::Cloud::Dataproc::V1::LoggingConfig]
610
+ # Optional. The runtime log config for job execution.
611
+ class FlinkJob
612
+ include ::Google::Protobuf::MessageExts
613
+ extend ::Google::Protobuf::MessageExts::ClassMethods
614
+
615
+ # @!attribute [rw] key
616
+ # @return [::String]
617
+ # @!attribute [rw] value
618
+ # @return [::String]
619
+ class PropertiesEntry
620
+ include ::Google::Protobuf::MessageExts
621
+ extend ::Google::Protobuf::MessageExts::ClassMethods
622
+ end
623
+ end
624
+
547
625
  # Dataproc job config.
548
626
  # @!attribute [rw] cluster_name
549
627
  # @return [::String]
@@ -577,7 +655,7 @@ module Google
577
655
  # @!attribute [r] details
578
656
  # @return [::String]
579
657
  # Optional. Output only. Job state details, such as an error
580
- # description if the state is <code>ERROR</code>.
658
+ # description if the state is `ERROR`.
581
659
  # @!attribute [r] state_start_time
582
660
  # @return [::Google::Protobuf::Timestamp]
583
661
  # Output only. The time when this state was entered.
@@ -637,14 +715,14 @@ module Google
637
715
  # Applies to RUNNING state.
638
716
  SUBMITTED = 1
639
717
 
640
- # The Job has been received and is awaiting execution (it may be waiting
718
+ # The Job has been received and is awaiting execution (it might be waiting
641
719
  # for a condition to be met). See the "details" field for the reason for
642
720
  # the delay.
643
721
  #
644
722
  # Applies to RUNNING state.
645
723
  QUEUED = 2
646
724
 
647
- # The agent-reported status is out of date, which may be caused by a
725
+ # The agent-reported status is out of date, which can be caused by a
648
726
  # loss of communication between the agent and Dataproc. If the
649
727
  # agent does not send a timely update, the job will fail.
650
728
  #
@@ -741,34 +819,57 @@ module Google
741
819
  # @!attribute [rw] hadoop_job
742
820
  # @return [::Google::Cloud::Dataproc::V1::HadoopJob]
743
821
  # Optional. Job is a Hadoop job.
822
+ #
823
+ # Note: The following fields are mutually exclusive: `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
744
824
  # @!attribute [rw] spark_job
745
825
  # @return [::Google::Cloud::Dataproc::V1::SparkJob]
746
826
  # Optional. Job is a Spark job.
827
+ #
828
+ # Note: The following fields are mutually exclusive: `spark_job`, `hadoop_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
747
829
  # @!attribute [rw] pyspark_job
748
830
  # @return [::Google::Cloud::Dataproc::V1::PySparkJob]
749
831
  # Optional. Job is a PySpark job.
832
+ #
833
+ # Note: The following fields are mutually exclusive: `pyspark_job`, `hadoop_job`, `spark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
750
834
  # @!attribute [rw] hive_job
751
835
  # @return [::Google::Cloud::Dataproc::V1::HiveJob]
752
836
  # Optional. Job is a Hive job.
837
+ #
838
+ # Note: The following fields are mutually exclusive: `hive_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
753
839
  # @!attribute [rw] pig_job
754
840
  # @return [::Google::Cloud::Dataproc::V1::PigJob]
755
841
  # Optional. Job is a Pig job.
842
+ #
843
+ # Note: The following fields are mutually exclusive: `pig_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
756
844
  # @!attribute [rw] spark_r_job
757
845
  # @return [::Google::Cloud::Dataproc::V1::SparkRJob]
758
846
  # Optional. Job is a SparkR job.
847
+ #
848
+ # Note: The following fields are mutually exclusive: `spark_r_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
759
849
  # @!attribute [rw] spark_sql_job
760
850
  # @return [::Google::Cloud::Dataproc::V1::SparkSqlJob]
761
851
  # Optional. Job is a SparkSql job.
852
+ #
853
+ # Note: The following fields are mutually exclusive: `spark_sql_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
762
854
  # @!attribute [rw] presto_job
763
855
  # @return [::Google::Cloud::Dataproc::V1::PrestoJob]
764
856
  # Optional. Job is a Presto job.
857
+ #
858
+ # Note: The following fields are mutually exclusive: `presto_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
765
859
  # @!attribute [rw] trino_job
766
860
  # @return [::Google::Cloud::Dataproc::V1::TrinoJob]
767
861
  # Optional. Job is a Trino job.
862
+ #
863
+ # Note: The following fields are mutually exclusive: `trino_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
864
+ # @!attribute [rw] flink_job
865
+ # @return [::Google::Cloud::Dataproc::V1::FlinkJob]
866
+ # Optional. Job is a Flink job.
867
+ #
868
+ # Note: The following fields are mutually exclusive: `flink_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
768
869
  # @!attribute [r] status
769
870
  # @return [::Google::Cloud::Dataproc::V1::JobStatus]
770
871
  # Output only. The job status. Additional application-specific
771
- # status information may be contained in the <code>type_job</code>
872
+ # status information might be contained in the <code>type_job</code>
772
873
  # and <code>yarn_applications</code> fields.
773
874
  # @!attribute [r] status_history
774
875
  # @return [::Array<::Google::Cloud::Dataproc::V1::JobStatus>]
@@ -778,7 +879,7 @@ module Google
778
879
  # Output only. The collection of YARN applications spun up by this job.
779
880
  #
780
881
  # **Beta** Feature: This report is available for testing purposes only. It
781
- # may be changed before final release.
882
+ # might be changed before final release.
782
883
  # @!attribute [r] driver_output_resource_uri
783
884
  # @return [::String]
784
885
  # Output only. A URI pointing to the location of the stdout of the job's
@@ -786,14 +887,14 @@ module Google
786
887
  # @!attribute [r] driver_control_files_uri
787
888
  # @return [::String]
788
889
  # Output only. If present, the location of miscellaneous control files
789
- # which may be used as part of job setup and handling. If not present,
790
- # control files may be placed in the same location as `driver_output_uri`.
890
+ # which can be used as part of job setup and handling. If not present,
891
+ # control files might be placed in the same location as `driver_output_uri`.
791
892
  # @!attribute [rw] labels
792
893
  # @return [::Google::Protobuf::Map{::String => ::String}]
793
894
  # Optional. The labels to associate with this job.
794
895
  # Label **keys** must contain 1 to 63 characters, and must conform to
795
896
  # [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
796
- # Label **values** may be empty, but, if present, must contain 1 to 63
897
+ # Label **values** can be empty, but, if present, must contain 1 to 63
797
898
  # characters, and must conform to [RFC
798
899
  # 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
799
900
  # associated with a job.
@@ -804,7 +905,7 @@ module Google
804
905
  # @return [::String]
805
906
  # Output only. A UUID that uniquely identifies a job within the project
806
907
  # over time. This is in contrast to a user-settable reference.job_id that
807
- # may be reused over time.
908
+ # might be reused over time.
808
909
  # @!attribute [r] done
809
910
  # @return [::Boolean]
810
911
  # Output only. Indicates whether the job is completed. If the value is
@@ -843,12 +944,12 @@ module Google
843
944
  # Job scheduling options.
844
945
  # @!attribute [rw] max_failures_per_hour
845
946
  # @return [::Integer]
846
- # Optional. Maximum number of times per hour a driver may be restarted as
947
+ # Optional. Maximum number of times per hour a driver can be restarted as
847
948
  # a result of driver exiting with non-zero code before job is
848
949
  # reported failed.
849
950
  #
850
- # A job may be reported as thrashing if the driver exits with a non-zero code
851
- # four times within a 10-minute window.
951
+ # A job might be reported as thrashing if the driver exits with a non-zero
952
+ # code four times within a 10-minute window.
852
953
  #
853
954
  # Maximum value is 10.
854
955
  #
@@ -857,7 +958,7 @@ module Google
857
958
  # (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).
858
959
  # @!attribute [rw] max_failures_total
859
960
  # @return [::Integer]
860
- # Optional. Maximum total number of times a driver may be restarted as a
961
+ # Optional. Maximum total number of times a driver can be restarted as a
861
962
  # result of the driver exiting with a non-zero code. After the maximum number
862
963
  # is reached, the job will be reported as failed.
863
964
  #
@@ -1031,6 +1132,12 @@ module Google
1031
1132
  # Optional. This token is included in the response if there are more results
1032
1133
  # to fetch. To fetch additional results, provide this value as the
1033
1134
  # `page_token` in a subsequent <code>ListJobsRequest</code>.
1135
+ # @!attribute [r] unreachable
1136
+ # @return [::Array<::String>]
1137
+ # Output only. List of jobs with
1138
+ # {::Google::Cloud::Dataproc::V1::EncryptionConfig#kms_key kms_key}-encrypted
1139
+ # parameters that could not be decrypted. A response to a `jobs.get` request
1140
+ # may indicate the reason for the decryption failure for a specific job.
1034
1141
  class ListJobsResponse
1035
1142
  include ::Google::Protobuf::MessageExts
1036
1143
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -109,6 +109,13 @@ module Google
109
109
  # @!attribute [rw] jupyter_session
110
110
  # @return [::Google::Cloud::Dataproc::V1::JupyterConfig]
111
111
  # Optional. Jupyter session config.
112
+ #
113
+ # Note: The following fields are mutually exclusive: `jupyter_session`, `spark_connect_session`. If a field in that set is populated, all other fields in the set will automatically be cleared.
114
+ # @!attribute [rw] spark_connect_session
115
+ # @return [::Google::Cloud::Dataproc::V1::SparkConnectConfig]
116
+ # Optional. Spark Connect session config.
117
+ #
118
+ # Note: The following fields are mutually exclusive: `spark_connect_session`, `jupyter_session`. If a field in that set is populated, all other fields in the set will automatically be cleared.
112
119
  # @!attribute [r] creator
113
120
  # @return [::String]
114
121
  # Output only. The email address of the user who created the template.
@@ -82,12 +82,13 @@ module Google
82
82
  # A filter is a logical expression constraining the values of various fields
83
83
  # in each session resource. Filters are case sensitive, and may contain
84
84
  # multiple clauses combined with logical operators (AND, OR).
85
- # Supported fields are `session_id`, `session_uuid`, `state`, and
86
- # `create_time`.
85
+ # Supported fields are `session_id`, `session_uuid`, `state`, `create_time`,
86
+ # and `labels`.
87
87
  #
88
88
  # Example: `state = ACTIVE and create_time < "2023-01-01T00:00:00Z"`
89
89
  # is a filter for sessions in an ACTIVE state that were created before
90
- # 2023-01-01.
90
+ # 2023-01-01. `state = ACTIVE and labels.environment=production` is a filter
91
+ # for sessions in an ACTIVE state that have a production environment label.
91
92
  #
92
93
  # See https://google.aip.dev/assets/misc/ebnf-filtering.txt for a detailed
93
94
  # description of the filter syntax and a list of supported comparators.
@@ -165,6 +166,13 @@ module Google
165
166
  # @!attribute [rw] jupyter_session
166
167
  # @return [::Google::Cloud::Dataproc::V1::JupyterConfig]
167
168
  # Optional. Jupyter session config.
169
+ #
170
+ # Note: The following fields are mutually exclusive: `jupyter_session`, `spark_connect_session`. If a field in that set is populated, all other fields in the set will automatically be cleared.
171
+ # @!attribute [rw] spark_connect_session
172
+ # @return [::Google::Cloud::Dataproc::V1::SparkConnectConfig]
173
+ # Optional. Spark Connect session config.
174
+ #
175
+ # Note: The following fields are mutually exclusive: `spark_connect_session`, `jupyter_session`. If a field in that set is populated, all other fields in the set will automatically be cleared.
168
176
  # @!attribute [r] runtime_info
169
177
  # @return [::Google::Cloud::Dataproc::V1::RuntimeInfo]
170
178
  # Output only. Runtime information about session execution.
@@ -289,6 +297,12 @@ module Google
289
297
  SCALA = 2
290
298
  end
291
299
  end
300
+
301
+ # Spark Connect configuration for an interactive session.
302
+ class SparkConnectConfig
303
+ include ::Google::Protobuf::MessageExts
304
+ extend ::Google::Protobuf::MessageExts::ClassMethods
305
+ end
292
306
  end
293
307
  end
294
308
  end
@@ -76,9 +76,13 @@ module Google
76
76
  # @!attribute [rw] network_uri
77
77
  # @return [::String]
78
78
  # Optional. Network URI to connect workload to.
79
+ #
80
+ # Note: The following fields are mutually exclusive: `network_uri`, `subnetwork_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
79
81
  # @!attribute [rw] subnetwork_uri
80
82
  # @return [::String]
81
83
  # Optional. Subnetwork URI to connect workload to.
84
+ #
85
+ # Note: The following fields are mutually exclusive: `subnetwork_uri`, `network_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
82
86
  # @!attribute [rw] network_tags
83
87
  # @return [::Array<::String>]
84
88
  # Optional. Tags used for network traffic control.
@@ -563,10 +567,11 @@ module Google
563
567
  # Unspecified component. Specifying this will cause Cluster creation to fail.
564
568
  COMPONENT_UNSPECIFIED = 0
565
569
 
566
- # The Anaconda python distribution. The Anaconda component is not supported
567
- # in the Dataproc [2.0 image]
568
- # (/https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-release-2.0).
569
- # The 2.0 image is pre-installed with Miniconda.
570
+ # The Anaconda component is no longer supported or applicable to
571
+ # [supported Dataproc on Compute Engine image versions]
572
+ # (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-version-clusters#supported-dataproc-image-versions).
573
+ # It cannot be activated on clusters created with supported Dataproc on
574
+ # Compute Engine image versions.
570
575
  ANACONDA = 5
571
576
 
572
577
  # Docker
@@ -90,10 +90,50 @@ module Google
90
90
  # [managed
91
91
  # cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
92
92
  # the cluster is deleted.
93
+ # @!attribute [rw] encryption_config
94
+ # @return [::Google::Cloud::Dataproc::V1::WorkflowTemplate::EncryptionConfig]
95
+ # Optional. Encryption settings for encrypting workflow template job
96
+ # arguments.
93
97
  class WorkflowTemplate
94
98
  include ::Google::Protobuf::MessageExts
95
99
  extend ::Google::Protobuf::MessageExts::ClassMethods
96
100
 
101
+ # Encryption settings for encrypting workflow template job arguments.
102
+ # @!attribute [rw] kms_key
103
+ # @return [::String]
104
+ # Optional. The Cloud KMS key name to use for encrypting
105
+ # workflow template job arguments.
106
+ #
107
+ # When this this key is provided, the following workflow template
108
+ # [job arguments]
109
+ # (https://cloud.google.com/dataproc/docs/concepts/workflows/use-workflows#adding_jobs_to_a_template),
110
+ # if present, are
111
+ # [CMEK
112
+ # encrypted](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_workflow_template_data):
113
+ #
114
+ # * [FlinkJob
115
+ # args](https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob)
116
+ # * [HadoopJob
117
+ # args](https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob)
118
+ # * [SparkJob
119
+ # args](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob)
120
+ # * [SparkRJob
121
+ # args](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob)
122
+ # * [PySparkJob
123
+ # args](https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob)
124
+ # * [SparkSqlJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob)
125
+ # scriptVariables and queryList.queries
126
+ # * [HiveJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob)
127
+ # scriptVariables and queryList.queries
128
+ # * [PigJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob)
129
+ # scriptVariables and queryList.queries
130
+ # * [PrestoJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob)
131
+ # scriptVariables and queryList.queries
132
+ class EncryptionConfig
133
+ include ::Google::Protobuf::MessageExts
134
+ extend ::Google::Protobuf::MessageExts::ClassMethods
135
+ end
136
+
97
137
  # @!attribute [rw] key
98
138
  # @return [::String]
99
139
  # @!attribute [rw] value
@@ -110,12 +150,16 @@ module Google
110
150
  # @!attribute [rw] managed_cluster
111
151
  # @return [::Google::Cloud::Dataproc::V1::ManagedCluster]
112
152
  # A cluster that is managed by the workflow.
153
+ #
154
+ # Note: The following fields are mutually exclusive: `managed_cluster`, `cluster_selector`. If a field in that set is populated, all other fields in the set will automatically be cleared.
113
155
  # @!attribute [rw] cluster_selector
114
156
  # @return [::Google::Cloud::Dataproc::V1::ClusterSelector]
115
157
  # Optional. A selector that chooses target cluster for jobs based
116
158
  # on metadata.
117
159
  #
118
160
  # The selector is evaluated at the time each job is submitted.
161
+ #
162
+ # Note: The following fields are mutually exclusive: `cluster_selector`, `managed_cluster`. If a field in that set is populated, all other fields in the set will automatically be cleared.
119
163
  class WorkflowTemplatePlacement
120
164
  include ::Google::Protobuf::MessageExts
121
165
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -202,27 +246,53 @@ module Google
202
246
  # @!attribute [rw] hadoop_job
203
247
  # @return [::Google::Cloud::Dataproc::V1::HadoopJob]
204
248
  # Optional. Job is a Hadoop job.
249
+ #
250
+ # Note: The following fields are mutually exclusive: `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
205
251
  # @!attribute [rw] spark_job
206
252
  # @return [::Google::Cloud::Dataproc::V1::SparkJob]
207
253
  # Optional. Job is a Spark job.
254
+ #
255
+ # Note: The following fields are mutually exclusive: `spark_job`, `hadoop_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
208
256
  # @!attribute [rw] pyspark_job
209
257
  # @return [::Google::Cloud::Dataproc::V1::PySparkJob]
210
258
  # Optional. Job is a PySpark job.
259
+ #
260
+ # Note: The following fields are mutually exclusive: `pyspark_job`, `hadoop_job`, `spark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
211
261
  # @!attribute [rw] hive_job
212
262
  # @return [::Google::Cloud::Dataproc::V1::HiveJob]
213
263
  # Optional. Job is a Hive job.
264
+ #
265
+ # Note: The following fields are mutually exclusive: `hive_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
214
266
  # @!attribute [rw] pig_job
215
267
  # @return [::Google::Cloud::Dataproc::V1::PigJob]
216
268
  # Optional. Job is a Pig job.
269
+ #
270
+ # Note: The following fields are mutually exclusive: `pig_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
217
271
  # @!attribute [rw] spark_r_job
218
272
  # @return [::Google::Cloud::Dataproc::V1::SparkRJob]
219
273
  # Optional. Job is a SparkR job.
274
+ #
275
+ # Note: The following fields are mutually exclusive: `spark_r_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_sql_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
220
276
  # @!attribute [rw] spark_sql_job
221
277
  # @return [::Google::Cloud::Dataproc::V1::SparkSqlJob]
222
278
  # Optional. Job is a SparkSql job.
279
+ #
280
+ # Note: The following fields are mutually exclusive: `spark_sql_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `presto_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
223
281
  # @!attribute [rw] presto_job
224
282
  # @return [::Google::Cloud::Dataproc::V1::PrestoJob]
225
283
  # Optional. Job is a Presto job.
284
+ #
285
+ # Note: The following fields are mutually exclusive: `presto_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `trino_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
286
+ # @!attribute [rw] trino_job
287
+ # @return [::Google::Cloud::Dataproc::V1::TrinoJob]
288
+ # Optional. Job is a Trino job.
289
+ #
290
+ # Note: The following fields are mutually exclusive: `trino_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `flink_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
291
+ # @!attribute [rw] flink_job
292
+ # @return [::Google::Cloud::Dataproc::V1::FlinkJob]
293
+ # Optional. Job is a Flink job.
294
+ #
295
+ # Note: The following fields are mutually exclusive: `flink_job`, `hadoop_job`, `spark_job`, `pyspark_job`, `hive_job`, `pig_job`, `spark_r_job`, `spark_sql_job`, `presto_job`, `trino_job`. If a field in that set is populated, all other fields in the set will automatically be cleared.
226
296
  # @!attribute [rw] labels
227
297
  # @return [::Google::Protobuf::Map{::String => ::String}]
228
298
  # Optional. The labels to associate with this job.
@@ -336,9 +406,13 @@ module Google
336
406
  # @!attribute [rw] regex
337
407
  # @return [::Google::Cloud::Dataproc::V1::RegexValidation]
338
408
  # Validation based on regular expressions.
409
+ #
410
+ # Note: The following fields are mutually exclusive: `regex`, `values`. If a field in that set is populated, all other fields in the set will automatically be cleared.
339
411
  # @!attribute [rw] values
340
412
  # @return [::Google::Cloud::Dataproc::V1::ValueValidation]
341
413
  # Validation based on a list of allowed values.
414
+ #
415
+ # Note: The following fields are mutually exclusive: `values`, `regex`. If a field in that set is populated, all other fields in the set will automatically be cleared.
342
416
  class ParameterValidation
343
417
  include ::Google::Protobuf::MessageExts
344
418
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -691,6 +765,11 @@ module Google
691
765
  # Output only. This token is included in the response if there are more
692
766
  # results to fetch. To fetch additional results, provide this value as the
693
767
  # page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
768
+ # @!attribute [r] unreachable
769
+ # @return [::Array<::String>]
770
+ # Output only. List of workflow templates that could not be included in the
771
+ # response. Attempting to get one of these resources may indicate why it was
772
+ # not included in the list response.
694
773
  class ListWorkflowTemplatesResponse
695
774
  include ::Google::Protobuf::MessageExts
696
775
  extend ::Google::Protobuf::MessageExts::ClassMethods