google-cloud-dataproc-v1 0.2.3 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ded96fc15826c4bb164ff7ff7baa1a2ab0f091f767a81640511d0ac9d13dcb47
4
- data.tar.gz: da6259371c468e36daca488264f7e8b69dfb956fa76b894794f0c34e03027582
3
+ metadata.gz: b2e6d82df142cab884923a81f9daa310e1e52ec776cc97bf592a721017865eb3
4
+ data.tar.gz: d44cf35542e0d5ddb0c3a7b6e702e0f99a05c8a82766a00c9a20c42a671ab65a
5
5
  SHA512:
6
- metadata.gz: f90aad28ebf5a797520bd39b4a356c4cb681d09353a71d310305ace372b92d9d78fa7e876cbbed5ac28bef403757fbacb6fbd78bef52f6ec2b635e9b8681380e
7
- data.tar.gz: 33c193694266304e6e6a9067cfd968d886b02f9d3315ba2792a3aa8e61bcdce97dea7ba93c6aa7d2d97140a2bcfa1fdb3defd766cc5d0963f83771460b22c064
6
+ metadata.gz: 580c5bc331bcbc9ac33439101998103f366b3c4175500b9f36f97d11b3387c81fc8ec5e3ce2500aad20c77235a747950b2beac3ddcb055108564823abe746c8b
7
+ data.tar.gz: 294093cb4e0ccbacabe0dd6b05d92a0edf71b55e70428187e2bd7923748c4a0ac4528570394a3d57bf7849e89542fc98fe9518b25ff3e9b335e594def6d38e92
@@ -35,19 +35,19 @@ module Google
35
35
  self.service_name = 'google.cloud.dataproc.v1.AutoscalingPolicyService'
36
36
 
37
37
  # Creates new autoscaling policy.
38
- rpc :CreateAutoscalingPolicy, CreateAutoscalingPolicyRequest, AutoscalingPolicy
38
+ rpc :CreateAutoscalingPolicy, Google::Cloud::Dataproc::V1::CreateAutoscalingPolicyRequest, Google::Cloud::Dataproc::V1::AutoscalingPolicy
39
39
  # Updates (replaces) autoscaling policy.
40
40
  #
41
41
  # Disabled check for update_mask, because all updates will be full
42
42
  # replacements.
43
- rpc :UpdateAutoscalingPolicy, UpdateAutoscalingPolicyRequest, AutoscalingPolicy
43
+ rpc :UpdateAutoscalingPolicy, Google::Cloud::Dataproc::V1::UpdateAutoscalingPolicyRequest, Google::Cloud::Dataproc::V1::AutoscalingPolicy
44
44
  # Retrieves autoscaling policy.
45
- rpc :GetAutoscalingPolicy, GetAutoscalingPolicyRequest, AutoscalingPolicy
45
+ rpc :GetAutoscalingPolicy, Google::Cloud::Dataproc::V1::GetAutoscalingPolicyRequest, Google::Cloud::Dataproc::V1::AutoscalingPolicy
46
46
  # Lists autoscaling policies in the project.
47
- rpc :ListAutoscalingPolicies, ListAutoscalingPoliciesRequest, ListAutoscalingPoliciesResponse
47
+ rpc :ListAutoscalingPolicies, Google::Cloud::Dataproc::V1::ListAutoscalingPoliciesRequest, Google::Cloud::Dataproc::V1::ListAutoscalingPoliciesResponse
48
48
  # Deletes an autoscaling policy. It is an error to delete an autoscaling
49
49
  # policy that is in use by one or more clusters.
50
- rpc :DeleteAutoscalingPolicy, DeleteAutoscalingPolicyRequest, Google::Protobuf::Empty
50
+ rpc :DeleteAutoscalingPolicy, Google::Cloud::Dataproc::V1::DeleteAutoscalingPolicyRequest, Google::Protobuf::Empty
51
51
  end
52
52
 
53
53
  Stub = Service.rpc_stub_class
@@ -73,7 +73,7 @@ module Google
73
73
  initial_delay: 0.1,
74
74
  max_delay: 60.0,
75
75
  multiplier: 1.3,
76
- retry_codes: ["DEADLINE_EXCEEDED", "UNAVAILABLE"]
76
+ retry_codes: [4, 14]
77
77
  }
78
78
 
79
79
  default_config.rpcs.get_autoscaling_policy.timeout = 600.0
@@ -81,7 +81,7 @@ module Google
81
81
  initial_delay: 0.1,
82
82
  max_delay: 60.0,
83
83
  multiplier: 1.3,
84
- retry_codes: ["DEADLINE_EXCEEDED", "UNAVAILABLE"]
84
+ retry_codes: [4, 14]
85
85
  }
86
86
 
87
87
  default_config.rpcs.list_autoscaling_policies.timeout = 600.0
@@ -89,7 +89,7 @@ module Google
89
89
  initial_delay: 0.1,
90
90
  max_delay: 60.0,
91
91
  multiplier: 1.3,
92
- retry_codes: ["DEADLINE_EXCEEDED", "UNAVAILABLE"]
92
+ retry_codes: [4, 14]
93
93
  }
94
94
 
95
95
  default_config.rpcs.delete_autoscaling_policy.timeout = 600.0
@@ -69,7 +69,7 @@ module Google
69
69
  initial_delay: 0.1,
70
70
  max_delay: 60.0,
71
71
  multiplier: 1.3,
72
- retry_codes: ["UNAVAILABLE"]
72
+ retry_codes: [14]
73
73
  }
74
74
 
75
75
  default_config.rpcs.update_cluster.timeout = 300.0
@@ -77,7 +77,7 @@ module Google
77
77
  initial_delay: 0.1,
78
78
  max_delay: 60.0,
79
79
  multiplier: 1.3,
80
- retry_codes: ["UNAVAILABLE"]
80
+ retry_codes: [14]
81
81
  }
82
82
 
83
83
  default_config.rpcs.delete_cluster.timeout = 300.0
@@ -85,7 +85,7 @@ module Google
85
85
  initial_delay: 0.1,
86
86
  max_delay: 60.0,
87
87
  multiplier: 1.3,
88
- retry_codes: ["UNAVAILABLE"]
88
+ retry_codes: [14]
89
89
  }
90
90
 
91
91
  default_config.rpcs.get_cluster.timeout = 300.0
@@ -93,7 +93,7 @@ module Google
93
93
  initial_delay: 0.1,
94
94
  max_delay: 60.0,
95
95
  multiplier: 1.3,
96
- retry_codes: ["INTERNAL", "DEADLINE_EXCEEDED", "UNAVAILABLE"]
96
+ retry_codes: [13, 4, 14]
97
97
  }
98
98
 
99
99
  default_config.rpcs.list_clusters.timeout = 300.0
@@ -101,7 +101,7 @@ module Google
101
101
  initial_delay: 0.1,
102
102
  max_delay: 60.0,
103
103
  multiplier: 1.3,
104
- retry_codes: ["INTERNAL", "DEADLINE_EXCEEDED", "UNAVAILABLE"]
104
+ retry_codes: [13, 4, 14]
105
105
  }
106
106
 
107
107
  default_config.rpcs.diagnose_cluster.timeout = 300.0
@@ -109,7 +109,7 @@ module Google
109
109
  initial_delay: 0.1,
110
110
  max_delay: 60.0,
111
111
  multiplier: 1.3,
112
- retry_codes: ["UNAVAILABLE"]
112
+ retry_codes: [14]
113
113
  }
114
114
 
115
115
  default_config
@@ -25,6 +25,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
25
25
  end
26
26
  add_message "google.cloud.dataproc.v1.ClusterConfig" do
27
27
  optional :config_bucket, :string, 1
28
+ optional :temp_bucket, :string, 2
28
29
  optional :gce_cluster_config, :message, 8, "google.cloud.dataproc.v1.GceClusterConfig"
29
30
  optional :master_config, :message, 9, "google.cloud.dataproc.v1.InstanceGroupConfig"
30
31
  optional :worker_config, :message, 10, "google.cloud.dataproc.v1.InstanceGroupConfig"
@@ -35,6 +36,11 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
35
36
  optional :autoscaling_config, :message, 18, "google.cloud.dataproc.v1.AutoscalingConfig"
36
37
  optional :security_config, :message, 16, "google.cloud.dataproc.v1.SecurityConfig"
37
38
  optional :lifecycle_config, :message, 17, "google.cloud.dataproc.v1.LifecycleConfig"
39
+ optional :endpoint_config, :message, 19, "google.cloud.dataproc.v1.EndpointConfig"
40
+ end
41
+ add_message "google.cloud.dataproc.v1.EndpointConfig" do
42
+ map :http_ports, :string, :string, 1
43
+ optional :enable_http_port_access, :bool, 2
38
44
  end
39
45
  add_message "google.cloud.dataproc.v1.AutoscalingConfig" do
40
46
  optional :policy_uri, :string, 1
@@ -60,10 +66,16 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
60
66
  optional :machine_type_uri, :string, 4
61
67
  optional :disk_config, :message, 5, "google.cloud.dataproc.v1.DiskConfig"
62
68
  optional :is_preemptible, :bool, 6
69
+ optional :preemptibility, :enum, 10, "google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility"
63
70
  optional :managed_group_config, :message, 7, "google.cloud.dataproc.v1.ManagedGroupConfig"
64
71
  repeated :accelerators, :message, 8, "google.cloud.dataproc.v1.AcceleratorConfig"
65
72
  optional :min_cpu_platform, :string, 9
66
73
  end
74
+ add_enum "google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility" do
75
+ value :PREEMPTIBILITY_UNSPECIFIED, 0
76
+ value :NON_PREEMPTIBLE, 1
77
+ value :PREEMPTIBLE, 2
78
+ end
67
79
  add_message "google.cloud.dataproc.v1.ManagedGroupConfig" do
68
80
  optional :instance_template_name, :string, 1
69
81
  optional :instance_group_manager_name, :string, 2
@@ -203,10 +215,12 @@ module Google
203
215
  module V1
204
216
  Cluster = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.Cluster").msgclass
205
217
  ClusterConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.ClusterConfig").msgclass
218
+ EndpointConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.EndpointConfig").msgclass
206
219
  AutoscalingConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.AutoscalingConfig").msgclass
207
220
  EncryptionConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.EncryptionConfig").msgclass
208
221
  GceClusterConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.GceClusterConfig").msgclass
209
222
  InstanceGroupConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.InstanceGroupConfig").msgclass
223
+ InstanceGroupConfig::Preemptibility = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility").enummodule
210
224
  ManagedGroupConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.ManagedGroupConfig").msgclass
211
225
  AcceleratorConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.AcceleratorConfig").msgclass
212
226
  DiskConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.DiskConfig").msgclass
@@ -37,19 +37,19 @@ module Google
37
37
  # Creates a cluster in a project. The returned
38
38
  # [Operation.metadata][google.longrunning.Operation.metadata] will be
39
39
  # [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
40
- rpc :CreateCluster, CreateClusterRequest, Google::Longrunning::Operation
40
+ rpc :CreateCluster, Google::Cloud::Dataproc::V1::CreateClusterRequest, Google::Longrunning::Operation
41
41
  # Updates a cluster in a project. The returned
42
42
  # [Operation.metadata][google.longrunning.Operation.metadata] will be
43
43
  # [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
44
- rpc :UpdateCluster, UpdateClusterRequest, Google::Longrunning::Operation
44
+ rpc :UpdateCluster, Google::Cloud::Dataproc::V1::UpdateClusterRequest, Google::Longrunning::Operation
45
45
  # Deletes a cluster in a project. The returned
46
46
  # [Operation.metadata][google.longrunning.Operation.metadata] will be
47
47
  # [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
48
- rpc :DeleteCluster, DeleteClusterRequest, Google::Longrunning::Operation
48
+ rpc :DeleteCluster, Google::Cloud::Dataproc::V1::DeleteClusterRequest, Google::Longrunning::Operation
49
49
  # Gets the resource representation for a cluster in a project.
50
- rpc :GetCluster, GetClusterRequest, Cluster
50
+ rpc :GetCluster, Google::Cloud::Dataproc::V1::GetClusterRequest, Google::Cloud::Dataproc::V1::Cluster
51
51
  # Lists all regions/{region}/clusters in a project alphabetically.
52
- rpc :ListClusters, ListClustersRequest, ListClustersResponse
52
+ rpc :ListClusters, Google::Cloud::Dataproc::V1::ListClustersRequest, Google::Cloud::Dataproc::V1::ListClustersResponse
53
53
  # Gets cluster diagnostic information. The returned
54
54
  # [Operation.metadata][google.longrunning.Operation.metadata] will be
55
55
  # [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
@@ -57,7 +57,7 @@ module Google
57
57
  # [Operation.response][google.longrunning.Operation.response]
58
58
  # contains
59
59
  # [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
60
- rpc :DiagnoseCluster, DiagnoseClusterRequest, Google::Longrunning::Operation
60
+ rpc :DiagnoseCluster, Google::Cloud::Dataproc::V1::DiagnoseClusterRequest, Google::Longrunning::Operation
61
61
  end
62
62
 
63
63
  Stub = Service.rpc_stub_class
@@ -68,7 +68,7 @@ module Google
68
68
  initial_delay: 0.1,
69
69
  max_delay: 60.0,
70
70
  multiplier: 1.3,
71
- retry_codes: ["UNAVAILABLE"]
71
+ retry_codes: [14]
72
72
  }
73
73
 
74
74
  default_config.rpcs.submit_job_as_operation.timeout = 900.0
@@ -76,7 +76,7 @@ module Google
76
76
  initial_delay: 0.1,
77
77
  max_delay: 60.0,
78
78
  multiplier: 1.3,
79
- retry_codes: ["UNAVAILABLE"]
79
+ retry_codes: [14]
80
80
  }
81
81
 
82
82
  default_config.rpcs.get_job.timeout = 900.0
@@ -84,7 +84,7 @@ module Google
84
84
  initial_delay: 0.1,
85
85
  max_delay: 60.0,
86
86
  multiplier: 1.3,
87
- retry_codes: ["DEADLINE_EXCEEDED", "INTERNAL", "UNAVAILABLE"]
87
+ retry_codes: [4, 13, 14]
88
88
  }
89
89
 
90
90
  default_config.rpcs.list_jobs.timeout = 900.0
@@ -92,7 +92,7 @@ module Google
92
92
  initial_delay: 0.1,
93
93
  max_delay: 60.0,
94
94
  multiplier: 1.3,
95
- retry_codes: ["DEADLINE_EXCEEDED", "INTERNAL", "UNAVAILABLE"]
95
+ retry_codes: [4, 13, 14]
96
96
  }
97
97
 
98
98
  default_config.rpcs.update_job.timeout = 900.0
@@ -100,7 +100,7 @@ module Google
100
100
  initial_delay: 0.1,
101
101
  max_delay: 60.0,
102
102
  multiplier: 1.3,
103
- retry_codes: ["UNAVAILABLE"]
103
+ retry_codes: [14]
104
104
  }
105
105
 
106
106
  default_config.rpcs.cancel_job.timeout = 900.0
@@ -108,7 +108,7 @@ module Google
108
108
  initial_delay: 0.1,
109
109
  max_delay: 60.0,
110
110
  multiplier: 1.3,
111
- retry_codes: ["DEADLINE_EXCEEDED", "INTERNAL", "UNAVAILABLE"]
111
+ retry_codes: [4, 13, 14]
112
112
  }
113
113
 
114
114
  default_config.rpcs.delete_job.timeout = 900.0
@@ -116,7 +116,7 @@ module Google
116
116
  initial_delay: 0.1,
117
117
  max_delay: 60.0,
118
118
  multiplier: 1.3,
119
- retry_codes: ["UNAVAILABLE"]
119
+ retry_codes: [14]
120
120
  }
121
121
 
122
122
  default_config
@@ -34,24 +34,24 @@ module Google
34
34
  self.service_name = 'google.cloud.dataproc.v1.JobController'
35
35
 
36
36
  # Submits a job to a cluster.
37
- rpc :SubmitJob, SubmitJobRequest, Job
37
+ rpc :SubmitJob, Google::Cloud::Dataproc::V1::SubmitJobRequest, Google::Cloud::Dataproc::V1::Job
38
38
  # Submits job to a cluster.
39
- rpc :SubmitJobAsOperation, SubmitJobRequest, Google::Longrunning::Operation
39
+ rpc :SubmitJobAsOperation, Google::Cloud::Dataproc::V1::SubmitJobRequest, Google::Longrunning::Operation
40
40
  # Gets the resource representation for a job in a project.
41
- rpc :GetJob, GetJobRequest, Job
41
+ rpc :GetJob, Google::Cloud::Dataproc::V1::GetJobRequest, Google::Cloud::Dataproc::V1::Job
42
42
  # Lists regions/{region}/jobs in a project.
43
- rpc :ListJobs, ListJobsRequest, ListJobsResponse
43
+ rpc :ListJobs, Google::Cloud::Dataproc::V1::ListJobsRequest, Google::Cloud::Dataproc::V1::ListJobsResponse
44
44
  # Updates a job in a project.
45
- rpc :UpdateJob, UpdateJobRequest, Job
45
+ rpc :UpdateJob, Google::Cloud::Dataproc::V1::UpdateJobRequest, Google::Cloud::Dataproc::V1::Job
46
46
  # Starts a job cancellation request. To access the job resource
47
47
  # after cancellation, call
48
48
  # [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
49
49
  # or
50
50
  # [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
51
- rpc :CancelJob, CancelJobRequest, Job
51
+ rpc :CancelJob, Google::Cloud::Dataproc::V1::CancelJobRequest, Google::Cloud::Dataproc::V1::Job
52
52
  # Deletes the job from the project. If the job is active, the delete fails,
53
53
  # and the response returns `FAILED_PRECONDITION`.
54
- rpc :DeleteJob, DeleteJobRequest, Google::Protobuf::Empty
54
+ rpc :DeleteJob, Google::Cloud::Dataproc::V1::DeleteJobRequest, Google::Protobuf::Empty
55
55
  end
56
56
 
57
57
  Stub = Service.rpc_stub_class
@@ -21,7 +21,7 @@ module Google
21
21
  module Cloud
22
22
  module Dataproc
23
23
  module V1
24
- VERSION = "0.2.3"
24
+ VERSION = "0.3.0"
25
25
  end
26
26
  end
27
27
  end
@@ -71,7 +71,7 @@ module Google
71
71
  initial_delay: 0.1,
72
72
  max_delay: 60.0,
73
73
  multiplier: 1.3,
74
- retry_codes: ["UNAVAILABLE"]
74
+ retry_codes: [14]
75
75
  }
76
76
 
77
77
  default_config.rpcs.get_workflow_template.timeout = 600.0
@@ -79,7 +79,7 @@ module Google
79
79
  initial_delay: 0.1,
80
80
  max_delay: 60.0,
81
81
  multiplier: 1.3,
82
- retry_codes: ["DEADLINE_EXCEEDED", "INTERNAL", "UNAVAILABLE"]
82
+ retry_codes: [4, 13, 14]
83
83
  }
84
84
 
85
85
  default_config.rpcs.instantiate_workflow_template.timeout = 600.0
@@ -87,7 +87,7 @@ module Google
87
87
  initial_delay: 0.1,
88
88
  max_delay: 60.0,
89
89
  multiplier: 1.3,
90
- retry_codes: ["UNAVAILABLE"]
90
+ retry_codes: [14]
91
91
  }
92
92
 
93
93
  default_config.rpcs.instantiate_inline_workflow_template.timeout = 600.0
@@ -95,7 +95,7 @@ module Google
95
95
  initial_delay: 0.1,
96
96
  max_delay: 60.0,
97
97
  multiplier: 1.3,
98
- retry_codes: ["UNAVAILABLE"]
98
+ retry_codes: [14]
99
99
  }
100
100
 
101
101
  default_config.rpcs.update_workflow_template.timeout = 600.0
@@ -103,7 +103,7 @@ module Google
103
103
  initial_delay: 0.1,
104
104
  max_delay: 60.0,
105
105
  multiplier: 1.3,
106
- retry_codes: ["UNAVAILABLE"]
106
+ retry_codes: [14]
107
107
  }
108
108
 
109
109
  default_config.rpcs.list_workflow_templates.timeout = 600.0
@@ -111,7 +111,7 @@ module Google
111
111
  initial_delay: 0.1,
112
112
  max_delay: 60.0,
113
113
  multiplier: 1.3,
114
- retry_codes: ["DEADLINE_EXCEEDED", "INTERNAL", "UNAVAILABLE"]
114
+ retry_codes: [4, 13, 14]
115
115
  }
116
116
 
117
117
  default_config.rpcs.delete_workflow_template.timeout = 600.0
@@ -119,7 +119,7 @@ module Google
119
119
  initial_delay: 0.1,
120
120
  max_delay: 60.0,
121
121
  multiplier: 1.3,
122
- retry_codes: ["UNAVAILABLE"]
122
+ retry_codes: [14]
123
123
  }
124
124
 
125
125
  default_config
@@ -35,12 +35,12 @@ module Google
35
35
  self.service_name = 'google.cloud.dataproc.v1.WorkflowTemplateService'
36
36
 
37
37
  # Creates new workflow template.
38
- rpc :CreateWorkflowTemplate, CreateWorkflowTemplateRequest, WorkflowTemplate
38
+ rpc :CreateWorkflowTemplate, Google::Cloud::Dataproc::V1::CreateWorkflowTemplateRequest, Google::Cloud::Dataproc::V1::WorkflowTemplate
39
39
  # Retrieves the latest workflow template.
40
40
  #
41
41
  # Can retrieve previously instantiated template by specifying optional
42
42
  # version parameter.
43
- rpc :GetWorkflowTemplate, GetWorkflowTemplateRequest, WorkflowTemplate
43
+ rpc :GetWorkflowTemplate, Google::Cloud::Dataproc::V1::GetWorkflowTemplateRequest, Google::Cloud::Dataproc::V1::WorkflowTemplate
44
44
  # Instantiates a template and begins execution.
45
45
  #
46
46
  # The returned Operation can be used to track execution of
@@ -61,7 +61,7 @@ module Google
61
61
  # On successful completion,
62
62
  # [Operation.response][google.longrunning.Operation.response] will be
63
63
  # [Empty][google.protobuf.Empty].
64
- rpc :InstantiateWorkflowTemplate, InstantiateWorkflowTemplateRequest, Google::Longrunning::Operation
64
+ rpc :InstantiateWorkflowTemplate, Google::Cloud::Dataproc::V1::InstantiateWorkflowTemplateRequest, Google::Longrunning::Operation
65
65
  # Instantiates a template and begins execution.
66
66
  #
67
67
  # This method is equivalent to executing the sequence
@@ -86,14 +86,14 @@ module Google
86
86
  # On successful completion,
87
87
  # [Operation.response][google.longrunning.Operation.response] will be
88
88
  # [Empty][google.protobuf.Empty].
89
- rpc :InstantiateInlineWorkflowTemplate, InstantiateInlineWorkflowTemplateRequest, Google::Longrunning::Operation
89
+ rpc :InstantiateInlineWorkflowTemplate, Google::Cloud::Dataproc::V1::InstantiateInlineWorkflowTemplateRequest, Google::Longrunning::Operation
90
90
  # Updates (replaces) workflow template. The updated template
91
91
  # must contain version that matches the current server version.
92
- rpc :UpdateWorkflowTemplate, UpdateWorkflowTemplateRequest, WorkflowTemplate
92
+ rpc :UpdateWorkflowTemplate, Google::Cloud::Dataproc::V1::UpdateWorkflowTemplateRequest, Google::Cloud::Dataproc::V1::WorkflowTemplate
93
93
  # Lists workflows that match the specified filter in the request.
94
- rpc :ListWorkflowTemplates, ListWorkflowTemplatesRequest, ListWorkflowTemplatesResponse
94
+ rpc :ListWorkflowTemplates, Google::Cloud::Dataproc::V1::ListWorkflowTemplatesRequest, Google::Cloud::Dataproc::V1::ListWorkflowTemplatesResponse
95
95
  # Deletes a workflow template. It does not cancel in-progress workflows.
96
- rpc :DeleteWorkflowTemplate, DeleteWorkflowTemplateRequest, Google::Protobuf::Empty
96
+ rpc :DeleteWorkflowTemplate, Google::Cloud::Dataproc::V1::DeleteWorkflowTemplateRequest, Google::Protobuf::Empty
97
97
  end
98
98
 
99
99
  Stub = Service.rpc_stub_class
@@ -80,20 +80,26 @@ module Google
80
80
  # Bounds: [0s, 1d].
81
81
  # @!attribute [rw] scale_up_factor
82
82
  # @return [::Float]
83
- # Required. Fraction of average pending memory in the last cooldown period
83
+ # Required. Fraction of average YARN pending memory in the last cooldown period
84
84
  # for which to add workers. A scale-up factor of 1.0 will result in scaling
85
85
  # up so that there is no pending memory remaining after the update (more
86
86
  # aggressive scaling). A scale-up factor closer to 0 will result in a smaller
87
87
  # magnitude of scaling up (less aggressive scaling).
88
+ # See [How autoscaling
89
+ # works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
90
+ # for more information.
88
91
  #
89
92
  # Bounds: [0.0, 1.0].
90
93
  # @!attribute [rw] scale_down_factor
91
94
  # @return [::Float]
92
- # Required. Fraction of average pending memory in the last cooldown period
95
+ # Required. Fraction of average YARN pending memory in the last cooldown period
93
96
  # for which to remove workers. A scale-down factor of 1 will result in
94
97
  # scaling down so that there is no available memory remaining after the
95
98
  # update (more aggressive scaling). A scale-down factor of 0 disables
96
99
  # removing workers, which can be beneficial for autoscaling a single job.
100
+ # See [How autoscaling
101
+ # works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
102
+ # for more information.
97
103
  #
98
104
  # Bounds: [0.0, 1.0].
99
105
  # @!attribute [rw] scale_up_min_worker_fraction
@@ -85,6 +85,17 @@ module Google
85
85
  # and manage this project-level, per-location bucket (see
86
86
  # [Dataproc staging
87
87
  # bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
88
+ # @!attribute [rw] temp_bucket
89
+ # @return [::String]
90
+ # Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
91
+ # such as Spark and MapReduce history files.
92
+ # If you do not specify a temp bucket,
93
+ # Dataproc will determine a Cloud Storage location (US,
94
+ # ASIA, or EU) for your cluster's temp bucket according to the
95
+ # Compute Engine zone where your cluster is deployed, and then create
96
+ # and manage this project-level, per-location bucket. The default bucket has
97
+ # a TTL of 90 days, but you can use any TTL (or none) if you specify a
98
+ # bucket.
88
99
  # @!attribute [rw] gce_cluster_config
89
100
  # @return [::Google::Cloud::Dataproc::V1::GceClusterConfig]
90
101
  # Optional. The shared Compute Engine config settings for
@@ -132,11 +143,37 @@ module Google
132
143
  # @!attribute [rw] lifecycle_config
133
144
  # @return [::Google::Cloud::Dataproc::V1::LifecycleConfig]
134
145
  # Optional. Lifecycle setting for the cluster.
146
+ # @!attribute [rw] endpoint_config
147
+ # @return [::Google::Cloud::Dataproc::V1::EndpointConfig]
148
+ # Optional. Port/endpoint configuration for this cluster
135
149
  class ClusterConfig
136
150
  include ::Google::Protobuf::MessageExts
137
151
  extend ::Google::Protobuf::MessageExts::ClassMethods
138
152
  end
139
153
 
154
+ # Endpoint config for this cluster
155
+ # @!attribute [r] http_ports
156
+ # @return [::Google::Protobuf::Map{::String => ::String}]
157
+ # Output only. The map of port descriptions to URLs. Will only be populated
158
+ # if enable_http_port_access is true.
159
+ # @!attribute [rw] enable_http_port_access
160
+ # @return [::Boolean]
161
+ # Optional. If true, enable http access to specific ports on the cluster
162
+ # from external sources. Defaults to false.
163
+ class EndpointConfig
164
+ include ::Google::Protobuf::MessageExts
165
+ extend ::Google::Protobuf::MessageExts::ClassMethods
166
+
167
+ # @!attribute [rw] key
168
+ # @return [::String]
169
+ # @!attribute [rw] value
170
+ # @return [::String]
171
+ class HttpPortsEntry
172
+ include ::Google::Protobuf::MessageExts
173
+ extend ::Google::Protobuf::MessageExts::ClassMethods
174
+ end
175
+ end
176
+
140
177
  # Autoscaling Policy config associated with the cluster.
141
178
  # @!attribute [rw] policy_uri
142
179
  # @return [::String]
@@ -214,7 +251,7 @@ module Google
214
251
  # @!attribute [rw] service_account
215
252
  # @return [::String]
216
253
  # Optional. The [Dataproc service
217
- # account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc)
254
+ # account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc)
218
255
  # (also see [VM Data Plane
219
256
  # identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity))
220
257
  # used by Dataproc cluster VM instances to access Google Cloud Platform
@@ -318,6 +355,15 @@ module Google
318
355
  # @return [::Boolean]
319
356
  # Output only. Specifies that this instance group contains preemptible
320
357
  # instances.
358
+ # @!attribute [rw] preemptibility
359
+ # @return [::Google::Cloud::Dataproc::V1::InstanceGroupConfig::Preemptibility]
360
+ # Optional. Specifies the preemptibility of the instance group.
361
+ #
362
+ # The default value for master and worker groups is
363
+ # `NON_PREEMPTIBLE`. This default cannot be changed.
364
+ #
365
+ # The default value for secondary instances is
366
+ # `PREEMPTIBLE`.
321
367
  # @!attribute [r] managed_group_config
322
368
  # @return [::Google::Cloud::Dataproc::V1::ManagedGroupConfig]
323
369
  # Output only. The config for Compute Engine Instance Group
@@ -335,6 +381,27 @@ module Google
335
381
  class InstanceGroupConfig
336
382
  include ::Google::Protobuf::MessageExts
337
383
  extend ::Google::Protobuf::MessageExts::ClassMethods
384
+
385
+ # Controls the use of
386
+ # [preemptible instances]
387
+ # (https://cloud.google.com/compute/docs/instances/preemptible)
388
+ # within the group.
389
+ module Preemptibility
390
+ # Preemptibility is unspecified, the system will choose the
391
+ # appropriate setting for each instance group.
392
+ PREEMPTIBILITY_UNSPECIFIED = 0
393
+
394
+ # Instances are non-preemptible.
395
+ #
396
+ # This option is allowed for all instance groups and is the only valid
397
+ # value for Master and Worker instance groups.
398
+ NON_PREEMPTIBLE = 1
399
+
400
+ # Instances are preemptible.
401
+ #
402
+ # This option is allowed only for secondary worker groups.
403
+ PREEMPTIBLE = 2
404
+ end
338
405
  end
339
406
 
340
407
  # Specifies the resources used to actively manage an instance group.
@@ -567,7 +634,7 @@ module Google
567
634
  # @return [::String]
568
635
  # Optional. The version of software inside the cluster. It must be one of the
569
636
  # supported [Dataproc
570
- # Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
637
+ # Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions),
571
638
  # such as "1.2" (including a subminor version, such as "1.2.29"), or the
572
639
  # ["preview"
573
640
  # version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
@@ -153,12 +153,12 @@ module Google
153
153
  # Spark driver and tasks.
154
154
  # @!attribute [rw] file_uris
155
155
  # @return [::Array<::String>]
156
- # Optional. HCFS URIs of files to be copied to the working directory of
157
- # Spark drivers and distributed tasks. Useful for naively parallel tasks.
156
+ # Optional. HCFS URIs of files to be placed in the working directory of
157
+ # each executor. Useful for naively parallel tasks.
158
158
  # @!attribute [rw] archive_uris
159
159
  # @return [::Array<::String>]
160
- # Optional. HCFS URIs of archives to be extracted in the working directory
161
- # of Spark drivers and tasks. Supported file types:
160
+ # Optional. HCFS URIs of archives to be extracted into the working directory
161
+ # of each executor. Supported file types:
162
162
  # .jar, .tar, .tar.gz, .tgz, and .zip.
163
163
  # @!attribute [rw] properties
164
164
  # @return [::Google::Protobuf::Map{::String => ::String}]
@@ -206,11 +206,12 @@ module Google
206
206
  # Python driver and tasks.
207
207
  # @!attribute [rw] file_uris
208
208
  # @return [::Array<::String>]
209
- # Optional. HCFS URIs of files to be copied to the working directory of
210
- # Python drivers and distributed tasks. Useful for naively parallel tasks.
209
+ # Optional. HCFS URIs of files to be placed in the working directory of
210
+ # each executor. Useful for naively parallel tasks.
211
211
  # @!attribute [rw] archive_uris
212
212
  # @return [::Array<::String>]
213
- # Optional. HCFS URIs of archives to be extracted in the working directory of
213
+ # Optional. HCFS URIs of archives to be extracted into the working directory
214
+ # of each executor. Supported file types:
214
215
  # .jar, .tar, .tar.gz, .tgz, and .zip.
215
216
  # @!attribute [rw] properties
216
217
  # @return [::Google::Protobuf::Map{::String => ::String}]
@@ -421,12 +422,12 @@ module Google
421
422
  # occur that causes an incorrect job submission.
422
423
  # @!attribute [rw] file_uris
423
424
  # @return [::Array<::String>]
424
- # Optional. HCFS URIs of files to be copied to the working directory of
425
- # R drivers and distributed tasks. Useful for naively parallel tasks.
425
+ # Optional. HCFS URIs of files to be placed in the working directory of
426
+ # each executor. Useful for naively parallel tasks.
426
427
  # @!attribute [rw] archive_uris
427
428
  # @return [::Array<::String>]
428
- # Optional. HCFS URIs of archives to be extracted in the working directory of
429
- # Spark drivers and tasks. Supported file types:
429
+ # Optional. HCFS URIs of archives to be extracted into the working directory
430
+ # of each executor. Supported file types:
430
431
  # .jar, .tar, .tar.gz, .tgz, and .zip.
431
432
  # @!attribute [rw] properties
432
433
  # @return [::Google::Protobuf::Map{::String => ::String}]
@@ -595,8 +596,8 @@ module Google
595
596
  # Encapsulates the full scoping used to reference a job.
596
597
  # @!attribute [rw] project_id
597
598
  # @return [::String]
598
- # Required. The ID of the Google Cloud Platform project that the job
599
- # belongs to.
599
+ # Optional. The ID of the Google Cloud Platform project that the job belongs to. If
600
+ # specified, must match the request project ID.
600
601
  # @!attribute [rw] job_id
601
602
  # @return [::String]
602
603
  # Optional. The job ID, which must be unique within the project.
@@ -23,7 +23,7 @@ module Google
23
23
  module V1
24
24
  # Cluster components that can be activated.
25
25
  module Component
26
- # Unspecified component.
26
+ # Unspecified component. Specifying this will cause Cluster creation to fail.
27
27
  COMPONENT_UNSPECIFIED = 0
28
28
 
29
29
  # The Anaconda python distribution.
@@ -75,7 +75,7 @@ module Google
75
75
  # Required. The Directed Acyclic Graph of Jobs to submit.
76
76
  # @!attribute [rw] parameters
77
77
  # @return [::Array<::Google::Cloud::Dataproc::V1::TemplateParameter>]
78
- # Optional. emplate parameters whose values are substituted into the
78
+ # Optional. Template parameters whose values are substituted into the
79
79
  # template. Values for parameters must be provided when the template is
80
80
  # instantiated.
81
81
  class WorkflowTemplate
@@ -189,22 +189,28 @@ module Google
189
189
  # or hyphen. Must consist of between 3 and 50 characters.
190
190
  # @!attribute [rw] hadoop_job
191
191
  # @return [::Google::Cloud::Dataproc::V1::HadoopJob]
192
+ # Optional. Job is a Hadoop job.
192
193
  # @!attribute [rw] spark_job
193
194
  # @return [::Google::Cloud::Dataproc::V1::SparkJob]
195
+ # Optional. Job is a Spark job.
194
196
  # @!attribute [rw] pyspark_job
195
197
  # @return [::Google::Cloud::Dataproc::V1::PySparkJob]
198
+ # Optional. Job is a PySpark job.
196
199
  # @!attribute [rw] hive_job
197
200
  # @return [::Google::Cloud::Dataproc::V1::HiveJob]
201
+ # Optional. Job is a Hive job.
198
202
  # @!attribute [rw] pig_job
199
203
  # @return [::Google::Cloud::Dataproc::V1::PigJob]
204
+ # Optional. Job is a Pig job.
200
205
  # @!attribute [rw] spark_r_job
201
206
  # @return [::Google::Cloud::Dataproc::V1::SparkRJob]
202
- # Spark R job
207
+ # Optional. Job is a SparkR job.
203
208
  # @!attribute [rw] spark_sql_job
204
209
  # @return [::Google::Cloud::Dataproc::V1::SparkSqlJob]
210
+ # Optional. Job is a SparkSql job.
205
211
  # @!attribute [rw] presto_job
206
212
  # @return [::Google::Cloud::Dataproc::V1::PrestoJob]
207
- # Presto job
213
+ # Optional. Job is a Presto job.
208
214
  # @!attribute [rw] labels
209
215
  # @return [::Google::Protobuf::Map{::String => ::String}]
210
216
  # Optional. The labels to associate with this job.
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: google-cloud-dataproc-v1
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.3
4
+ version: 0.3.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Google LLC
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2020-06-18 00:00:00.000000000 Z
11
+ date: 2020-08-06 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: gapic-common
@@ -16,14 +16,14 @@ dependencies:
16
16
  requirements:
17
17
  - - "~>"
18
18
  - !ruby/object:Gem::Version
19
- version: '0.2'
19
+ version: '0.3'
20
20
  type: :runtime
21
21
  prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
24
  - - "~>"
25
25
  - !ruby/object:Gem::Version
26
- version: '0.2'
26
+ version: '0.3'
27
27
  - !ruby/object:Gem::Dependency
28
28
  name: google-cloud-errors
29
29
  requirement: !ruby/object:Gem::Requirement