aws-sdk-glue 1.138.0 → 1.139.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a445399d9ad92da03f0c9ed23469d07028fdecccec4cfd4806384aa7cca43f8e
4
- data.tar.gz: 0b476a7ad5670f67b586a43c522cea358b4d5643269f4c6e4046c7487a030cf5
3
+ metadata.gz: 1bd041bc46f2d935400f92415f17c4c06c164ad094d739eb69883e13e4b9c7c5
4
+ data.tar.gz: 192981a8963452e7aa09501ecf77b0df8f271411440d4579a949ad4321fb50a9
5
5
  SHA512:
6
- metadata.gz: 72cdb6ec7fbbe2415f8dc4160de1de4bce8bef9a49ee3e904c4f28bcadab3fd5487315c1e13f1c7cca085574829f4fa8c4d17d052f5430d533456c1e3225fe4a
7
- data.tar.gz: 78a0bf89f8806df3006fe9d334167b6cab042cd746fcfbbd3fea7737d5849d3d79d3776cbc93fb2d04b2d96096e0ee6dfbad7f85e5448ccfbb5b9d090168d908
6
+ metadata.gz: 99877713a3c5d842165d26438011951056941ad9472f02d082fb674df3bff08e131ad20e7ef463cfe945e90d52bc5aa658cc3d0af8e8bd0924c7aaf158c965dd
7
+ data.tar.gz: d8a5b642a7049875f2160be7892edb0a0379e3c18cdc0257c24536ee67bcd907d6f09adb99885a2aa1623bef897723b5aa4be731f6be7479a1c3b5192972cafc
data/CHANGELOG.md CHANGED
@@ -1,6 +1,11 @@
1
1
  Unreleased Changes
2
2
  ------------------
3
3
 
4
+ 1.139.0 (2023-05-30)
5
+ ------------------
6
+
7
+ * Feature - Added Runtime parameter to allow selection of Ray Runtime
8
+
4
9
  1.138.0 (2023-05-25)
5
10
  ------------------
6
11
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.138.0
1
+ 1.139.0
@@ -960,7 +960,7 @@ module Aws::Glue
960
960
  # resp.dev_endpoints[0].zeppelin_remote_spark_interpreter_port #=> Integer
961
961
  # resp.dev_endpoints[0].public_address #=> String
962
962
  # resp.dev_endpoints[0].status #=> String
963
- # resp.dev_endpoints[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
963
+ # resp.dev_endpoints[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
964
964
  # resp.dev_endpoints[0].glue_version #=> String
965
965
  # resp.dev_endpoints[0].number_of_workers #=> Integer
966
966
  # resp.dev_endpoints[0].number_of_nodes #=> Integer
@@ -1024,6 +1024,7 @@ module Aws::Glue
1024
1024
  # resp.jobs[0].command.name #=> String
1025
1025
  # resp.jobs[0].command.script_location #=> String
1026
1026
  # resp.jobs[0].command.python_version #=> String
1027
+ # resp.jobs[0].command.runtime #=> String
1027
1028
  # resp.jobs[0].default_arguments #=> Hash
1028
1029
  # resp.jobs[0].default_arguments["GenericString"] #=> String
1029
1030
  # resp.jobs[0].non_overridable_arguments #=> Hash
@@ -1034,7 +1035,7 @@ module Aws::Glue
1034
1035
  # resp.jobs[0].allocated_capacity #=> Integer
1035
1036
  # resp.jobs[0].timeout #=> Integer
1036
1037
  # resp.jobs[0].max_capacity #=> Float
1037
- # resp.jobs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
1038
+ # resp.jobs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
1038
1039
  # resp.jobs[0].number_of_workers #=> Integer
1039
1040
  # resp.jobs[0].security_configuration #=> String
1040
1041
  # resp.jobs[0].notification_property.notify_delay_after #=> Integer
@@ -2067,7 +2068,7 @@ module Aws::Glue
2067
2068
  # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
2068
2069
  # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
2069
2070
  # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
2070
- # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
2071
+ # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
2071
2072
  # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
2072
2073
  # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
2073
2074
  # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
@@ -2135,7 +2136,7 @@ module Aws::Glue
2135
2136
  # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
2136
2137
  # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
2137
2138
  # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
2138
- # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
2139
+ # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
2139
2140
  # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
2140
2141
  # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
2141
2142
  # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
@@ -3112,7 +3113,7 @@ module Aws::Glue
3112
3113
  # public_key: "GenericString",
3113
3114
  # public_keys: ["GenericString"],
3114
3115
  # number_of_nodes: 1,
3115
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X
3116
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X, Z.2X
3116
3117
  # glue_version: "GlueVersionString",
3117
3118
  # number_of_workers: 1,
3118
3119
  # extra_python_libs_s3_path: "GenericString",
@@ -3137,7 +3138,7 @@ module Aws::Glue
3137
3138
  # resp.yarn_endpoint_address #=> String
3138
3139
  # resp.zeppelin_remote_spark_interpreter_port #=> Integer
3139
3140
  # resp.number_of_nodes #=> Integer
3140
- # resp.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
3141
+ # resp.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
3141
3142
  # resp.glue_version #=> String
3142
3143
  # resp.number_of_workers #=> Integer
3143
3144
  # resp.availability_zone #=> String
@@ -3183,7 +3184,8 @@ module Aws::Glue
3183
3184
  # The `JobCommand` that runs this job.
3184
3185
  #
3185
3186
  # @option params [Hash<String,String>] :default_arguments
3186
- # The default arguments for this job.
3187
+ # The default arguments for every run of this job, specified as
3188
+ # name-value pairs.
3187
3189
  #
3188
3190
  # You can specify arguments here that your own job-execution script
3189
3191
  # consumes, as well as arguments that Glue itself consumes.
@@ -3197,17 +3199,23 @@ module Aws::Glue
3197
3199
  # arguments, see the [Calling Glue APIs in Python][1] topic in the
3198
3200
  # developer guide.
3199
3201
  #
3200
- # For information about the key-value pairs that Glue consumes to set up
3201
- # your job, see the [Special Parameters Used by Glue][2] topic in the
3202
+ # For information about the arguments you can provide to this field when
3203
+ # configuring Spark jobs, see the [Special Parameters Used by Glue][2]
3204
+ # topic in the developer guide.
3205
+ #
3206
+ # For information about the arguments you can provide to this field when
3207
+ # configuring Ray jobs, see [Using job parameters in Ray jobs][3] in the
3202
3208
  # developer guide.
3203
3209
  #
3204
3210
  #
3205
3211
  #
3206
3212
  # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html
3207
3213
  # [2]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html
3214
+ # [3]: https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html
3208
3215
  #
3209
3216
  # @option params [Hash<String,String>] :non_overridable_arguments
3210
- # Non-overridable arguments for this job, specified as name-value pairs.
3217
+ # Arguments for this job that are not overridden when providing job
3218
+ # arguments in a job run, specified as name-value pairs.
3211
3219
  #
3212
3220
  # @option params [Types::ConnectionsList] :connections
3213
3221
  # The connections used for this job.
@@ -3238,12 +3246,17 @@ module Aws::Glue
3238
3246
  # the number of Glue data processing units (DPUs) that can be allocated
3239
3247
  # when this job runs. A DPU is a relative measure of processing power
3240
3248
  # that consists of 4 vCPUs of compute capacity and 16 GB of memory. For
3241
- # more information, see the [Glue pricing page][1].
3249
+ # more information, see the [ Glue pricing page][1].
3250
+ #
3251
+ # For Glue version 2.0+ jobs, you cannot specify a `Maximum capacity`.
3252
+ # Instead, you should specify a `Worker type` and the `Number of
3253
+ # workers`.
3242
3254
  #
3243
- # Do not set `Max Capacity` if using `WorkerType` and `NumberOfWorkers`.
3255
+ # Do not set `MaxCapacity` if using `WorkerType` and `NumberOfWorkers`.
3244
3256
  #
3245
3257
  # The value that can be allocated for `MaxCapacity` depends on whether
3246
- # you are running a Python shell job or an Apache Spark ETL job:
3258
+ # you are running a Python shell job, an Apache Spark ETL job, or an
3259
+ # Apache Spark streaming ETL job:
3247
3260
  #
3248
3261
  # * When you specify a Python shell job
3249
3262
  # (`JobCommand.Name`="pythonshell"), you can allocate either 0.0625
@@ -3251,14 +3264,10 @@ module Aws::Glue
3251
3264
  #
3252
3265
  # * When you specify an Apache Spark ETL job
3253
3266
  # (`JobCommand.Name`="glueetl") or Apache Spark streaming ETL job
3254
- # (`JobCommand.Name`="gluestreaming"), you can allocate a minimum of
3255
- # 2 DPUs. The default is 10 DPUs. This job type cannot have a
3267
+ # (`JobCommand.Name`="gluestreaming"), you can allocate from 2 to
3268
+ # 100 DPUs. The default is 10 DPUs. This job type cannot have a
3256
3269
  # fractional DPU allocation.
3257
3270
  #
3258
- # For Glue version 2.0 jobs, you cannot instead specify a `Maximum
3259
- # capacity`. Instead, you should specify a `Worker type` and the `Number
3260
- # of workers`.
3261
- #
3262
3271
  #
3263
3272
  #
3264
3273
  # [1]: https://aws.amazon.com/glue/pricing/
@@ -3280,9 +3289,13 @@ module Aws::Glue
3280
3289
  # Specifies configuration properties of a job notification.
3281
3290
  #
3282
3291
  # @option params [String] :glue_version
3283
- # Glue version determines the versions of Apache Spark and Python that
3284
- # Glue supports. The Python version indicates the version supported for
3285
- # jobs of type Spark.
3292
+ # In Spark jobs, `GlueVersion` determines the versions of Apache Spark
3293
+ # and Python that Glue available in a job. The Python version indicates
3294
+ # the version supported for jobs of type Spark.
3295
+ #
3296
+ # Ray jobs should set `GlueVersion` to `4.0` or greater. However, the
3297
+ # versions of Ray, Python and additional libraries available in your Ray
3298
+ # job are determined by the `Runtime` parameter of the Job command.
3286
3299
  #
3287
3300
  # For more information about the available Glue versions and
3288
3301
  # corresponding Spark and Python versions, see [Glue version][1] in the
@@ -3301,7 +3314,8 @@ module Aws::Glue
3301
3314
  #
3302
3315
  # @option params [String] :worker_type
3303
3316
  # The type of predefined worker that is allocated when a job runs.
3304
- # Accepts a value of Standard, G.1X, G.2X, or G.025X.
3317
+ # Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs.
3318
+ # Accepts the value Z.2X for Ray jobs.
3305
3319
  #
3306
3320
  # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB
3307
3321
  # of memory and a 50GB disk, and 2 executors per worker.
@@ -3319,6 +3333,10 @@ module Aws::Glue
3319
3333
  # recommend this worker type for low volume streaming jobs. This
3320
3334
  # worker type is only available for Glue version 3.0 streaming jobs.
3321
3335
  #
3336
+ # * For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPU, 64
3337
+ # GB of m emory, 128 GB disk), and provides up to 8 Ray workers based
3338
+ # on the autoscaler.
3339
+ #
3322
3340
  # @option params [Hash<String,Types::CodeGenConfigurationNode>] :code_gen_configuration_nodes
3323
3341
  # The representation of a directed acyclic graph on which both the Glue
3324
3342
  # Studio visual component and Glue Studio code generation is based.
@@ -3539,7 +3557,7 @@ module Aws::Glue
3539
3557
  # role: "RoleString", # required
3540
3558
  # glue_version: "GlueVersionString",
3541
3559
  # max_capacity: 1.0,
3542
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X
3560
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X, Z.2X
3543
3561
  # number_of_workers: 1,
3544
3562
  # timeout: 1,
3545
3563
  # max_retries: 1,
@@ -4113,7 +4131,7 @@ module Aws::Glue
4113
4131
  # },
4114
4132
  # max_capacity: 1.0,
4115
4133
  # number_of_workers: 1,
4116
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X
4134
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X, Z.2X
4117
4135
  # security_configuration: "NameString",
4118
4136
  # glue_version: "GlueVersionString",
4119
4137
  # tags: {
@@ -6638,7 +6656,7 @@ module Aws::Glue
6638
6656
  # resp.dev_endpoint.zeppelin_remote_spark_interpreter_port #=> Integer
6639
6657
  # resp.dev_endpoint.public_address #=> String
6640
6658
  # resp.dev_endpoint.status #=> String
6641
- # resp.dev_endpoint.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
6659
+ # resp.dev_endpoint.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
6642
6660
  # resp.dev_endpoint.glue_version #=> String
6643
6661
  # resp.dev_endpoint.number_of_workers #=> Integer
6644
6662
  # resp.dev_endpoint.number_of_nodes #=> Integer
@@ -6709,7 +6727,7 @@ module Aws::Glue
6709
6727
  # resp.dev_endpoints[0].zeppelin_remote_spark_interpreter_port #=> Integer
6710
6728
  # resp.dev_endpoints[0].public_address #=> String
6711
6729
  # resp.dev_endpoints[0].status #=> String
6712
- # resp.dev_endpoints[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
6730
+ # resp.dev_endpoints[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
6713
6731
  # resp.dev_endpoints[0].glue_version #=> String
6714
6732
  # resp.dev_endpoints[0].number_of_workers #=> Integer
6715
6733
  # resp.dev_endpoints[0].number_of_nodes #=> Integer
@@ -6765,6 +6783,7 @@ module Aws::Glue
6765
6783
  # resp.job.command.name #=> String
6766
6784
  # resp.job.command.script_location #=> String
6767
6785
  # resp.job.command.python_version #=> String
6786
+ # resp.job.command.runtime #=> String
6768
6787
  # resp.job.default_arguments #=> Hash
6769
6788
  # resp.job.default_arguments["GenericString"] #=> String
6770
6789
  # resp.job.non_overridable_arguments #=> Hash
@@ -6775,7 +6794,7 @@ module Aws::Glue
6775
6794
  # resp.job.allocated_capacity #=> Integer
6776
6795
  # resp.job.timeout #=> Integer
6777
6796
  # resp.job.max_capacity #=> Float
6778
- # resp.job.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
6797
+ # resp.job.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
6779
6798
  # resp.job.number_of_workers #=> Integer
6780
6799
  # resp.job.security_configuration #=> String
6781
6800
  # resp.job.notification_property.notify_delay_after #=> Integer
@@ -7646,7 +7665,7 @@ module Aws::Glue
7646
7665
  # resp.job_run.execution_time #=> Integer
7647
7666
  # resp.job_run.timeout #=> Integer
7648
7667
  # resp.job_run.max_capacity #=> Float
7649
- # resp.job_run.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
7668
+ # resp.job_run.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
7650
7669
  # resp.job_run.number_of_workers #=> Integer
7651
7670
  # resp.job_run.security_configuration #=> String
7652
7671
  # resp.job_run.log_group_name #=> String
@@ -7712,7 +7731,7 @@ module Aws::Glue
7712
7731
  # resp.job_runs[0].execution_time #=> Integer
7713
7732
  # resp.job_runs[0].timeout #=> Integer
7714
7733
  # resp.job_runs[0].max_capacity #=> Float
7715
- # resp.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
7734
+ # resp.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
7716
7735
  # resp.job_runs[0].number_of_workers #=> Integer
7717
7736
  # resp.job_runs[0].security_configuration #=> String
7718
7737
  # resp.job_runs[0].log_group_name #=> String
@@ -7766,6 +7785,7 @@ module Aws::Glue
7766
7785
  # resp.jobs[0].command.name #=> String
7767
7786
  # resp.jobs[0].command.script_location #=> String
7768
7787
  # resp.jobs[0].command.python_version #=> String
7788
+ # resp.jobs[0].command.runtime #=> String
7769
7789
  # resp.jobs[0].default_arguments #=> Hash
7770
7790
  # resp.jobs[0].default_arguments["GenericString"] #=> String
7771
7791
  # resp.jobs[0].non_overridable_arguments #=> Hash
@@ -7776,7 +7796,7 @@ module Aws::Glue
7776
7796
  # resp.jobs[0].allocated_capacity #=> Integer
7777
7797
  # resp.jobs[0].timeout #=> Integer
7778
7798
  # resp.jobs[0].max_capacity #=> Float
7779
- # resp.jobs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
7799
+ # resp.jobs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
7780
7800
  # resp.jobs[0].number_of_workers #=> Integer
7781
7801
  # resp.jobs[0].security_configuration #=> String
7782
7802
  # resp.jobs[0].notification_property.notify_delay_after #=> Integer
@@ -8773,7 +8793,7 @@ module Aws::Glue
8773
8793
  # resp.role #=> String
8774
8794
  # resp.glue_version #=> String
8775
8795
  # resp.max_capacity #=> Float
8776
- # resp.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
8796
+ # resp.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
8777
8797
  # resp.number_of_workers #=> Integer
8778
8798
  # resp.timeout #=> Integer
8779
8799
  # resp.max_retries #=> Integer
@@ -8883,7 +8903,7 @@ module Aws::Glue
8883
8903
  # resp.transforms[0].role #=> String
8884
8904
  # resp.transforms[0].glue_version #=> String
8885
8905
  # resp.transforms[0].max_capacity #=> Float
8886
- # resp.transforms[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
8906
+ # resp.transforms[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
8887
8907
  # resp.transforms[0].number_of_workers #=> Integer
8888
8908
  # resp.transforms[0].timeout #=> Integer
8889
8909
  # resp.transforms[0].max_retries #=> Integer
@@ -11249,7 +11269,7 @@ module Aws::Glue
11249
11269
  # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
11250
11270
  # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
11251
11271
  # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
11252
- # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
11272
+ # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
11253
11273
  # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
11254
11274
  # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
11255
11275
  # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
@@ -11317,7 +11337,7 @@ module Aws::Glue
11317
11337
  # resp.workflow.graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
11318
11338
  # resp.workflow.graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
11319
11339
  # resp.workflow.graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
11320
- # resp.workflow.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
11340
+ # resp.workflow.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
11321
11341
  # resp.workflow.graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
11322
11342
  # resp.workflow.graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
11323
11343
  # resp.workflow.graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
@@ -11438,7 +11458,7 @@ module Aws::Glue
11438
11458
  # resp.run.graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
11439
11459
  # resp.run.graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
11440
11460
  # resp.run.graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
11441
- # resp.run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
11461
+ # resp.run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
11442
11462
  # resp.run.graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
11443
11463
  # resp.run.graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
11444
11464
  # resp.run.graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
@@ -11599,7 +11619,7 @@ module Aws::Glue
11599
11619
  # resp.runs[0].graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
11600
11620
  # resp.runs[0].graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
11601
11621
  # resp.runs[0].graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
11602
- # resp.runs[0].graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
11622
+ # resp.runs[0].graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
11603
11623
  # resp.runs[0].graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
11604
11624
  # resp.runs[0].graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
11605
11625
  # resp.runs[0].graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
@@ -13740,7 +13760,7 @@ module Aws::Glue
13740
13760
  # The ID of a previous `JobRun` to retry.
13741
13761
  #
13742
13762
  # @option params [Hash<String,String>] :arguments
13743
- # The job arguments specifically for this run. For this job run, they
13763
+ # The job arguments associated with this run. For this job run, they
13744
13764
  # replace the default arguments set in the job definition itself.
13745
13765
  #
13746
13766
  # You can specify arguments here that your own job-execution script
@@ -13755,14 +13775,19 @@ module Aws::Glue
13755
13775
  # arguments, see the [Calling Glue APIs in Python][1] topic in the
13756
13776
  # developer guide.
13757
13777
  #
13758
- # For information about the key-value pairs that Glue consumes to set up
13759
- # your job, see the [Special Parameters Used by Glue][2] topic in the
13778
+ # For information about the arguments you can provide to this field when
13779
+ # configuring Spark jobs, see the [Special Parameters Used by Glue][2]
13780
+ # topic in the developer guide.
13781
+ #
13782
+ # For information about the arguments you can provide to this field when
13783
+ # configuring Ray jobs, see [Using job parameters in Ray jobs][3] in the
13760
13784
  # developer guide.
13761
13785
  #
13762
13786
  #
13763
13787
  #
13764
13788
  # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html
13765
13789
  # [2]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html
13790
+ # [3]: https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html
13766
13791
  #
13767
13792
  # @option params [Integer] :allocated_capacity
13768
13793
  # This field is deprecated. Use `MaxCapacity` instead.
@@ -13786,24 +13811,31 @@ module Aws::Glue
13786
13811
  # jobs is 2,880 minutes (48 hours).
13787
13812
  #
13788
13813
  # @option params [Float] :max_capacity
13789
- # The number of Glue data processing units (DPUs) that can be allocated
13814
+ # For Glue version 1.0 or earlier jobs, using the standard worker type,
13815
+ # the number of Glue data processing units (DPUs) that can be allocated
13790
13816
  # when this job runs. A DPU is a relative measure of processing power
13791
13817
  # that consists of 4 vCPUs of compute capacity and 16 GB of memory. For
13792
- # more information, see the [Glue pricing page][1].
13818
+ # more information, see the [ Glue pricing page][1].
13819
+ #
13820
+ # For Glue version 2.0+ jobs, you cannot specify a `Maximum capacity`.
13821
+ # Instead, you should specify a `Worker type` and the `Number of
13822
+ # workers`.
13793
13823
  #
13794
- # Do not set `Max Capacity` if using `WorkerType` and `NumberOfWorkers`.
13824
+ # Do not set `MaxCapacity` if using `WorkerType` and `NumberOfWorkers`.
13795
13825
  #
13796
13826
  # The value that can be allocated for `MaxCapacity` depends on whether
13797
- # you are running a Python shell job, or an Apache Spark ETL job:
13827
+ # you are running a Python shell job, an Apache Spark ETL job, or an
13828
+ # Apache Spark streaming ETL job:
13798
13829
  #
13799
13830
  # * When you specify a Python shell job
13800
13831
  # (`JobCommand.Name`="pythonshell"), you can allocate either 0.0625
13801
13832
  # or 1 DPU. The default is 0.0625 DPU.
13802
13833
  #
13803
13834
  # * When you specify an Apache Spark ETL job
13804
- # (`JobCommand.Name`="glueetl"), you can allocate a minimum of 2
13805
- # DPUs. The default is 10 DPUs. This job type cannot have a fractional
13806
- # DPU allocation.
13835
+ # (`JobCommand.Name`="glueetl") or Apache Spark streaming ETL job
13836
+ # (`JobCommand.Name`="gluestreaming"), you can allocate from 2 to
13837
+ # 100 DPUs. The default is 10 DPUs. This job type cannot have a
13838
+ # fractional DPU allocation.
13807
13839
  #
13808
13840
  #
13809
13841
  #
@@ -13818,22 +13850,29 @@ module Aws::Glue
13818
13850
  #
13819
13851
  # @option params [String] :worker_type
13820
13852
  # The type of predefined worker that is allocated when a job runs.
13821
- # Accepts a value of Standard, G.1X, G.2X, or G.025X.
13853
+ # Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs.
13854
+ # Accepts the value Z.2X for Ray jobs.
13822
13855
  #
13823
13856
  # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB
13824
13857
  # of memory and a 50GB disk, and 2 executors per worker.
13825
13858
  #
13826
- # * For the `G.1X` worker type, each worker provides 4 vCPU, 16 GB of
13827
- # memory and a 64GB disk, and 1 executor per worker.
13859
+ # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPU, 16 GB
13860
+ # of memory, 64 GB disk), and provides 1 executor per worker. We
13861
+ # recommend this worker type for memory-intensive jobs.
13828
13862
  #
13829
- # * For the `G.2X` worker type, each worker provides 8 vCPU, 32 GB of
13830
- # memory and a 128GB disk, and 1 executor per worker.
13863
+ # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPU, 32 GB
13864
+ # of memory, 128 GB disk), and provides 1 executor per worker. We
13865
+ # recommend this worker type for memory-intensive jobs.
13831
13866
  #
13832
13867
  # * For the `G.025X` worker type, each worker maps to 0.25 DPU (2 vCPU,
13833
13868
  # 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We
13834
13869
  # recommend this worker type for low volume streaming jobs. This
13835
13870
  # worker type is only available for Glue version 3.0 streaming jobs.
13836
13871
  #
13872
+ # * For the `Z.2X` worker type, each worker maps to 2 DPU (8vCPU, 64 GB
13873
+ # of m emory, 128 GB disk), and provides up to 8 Ray workers (one per
13874
+ # vCPU) based on the autoscaler.
13875
+ #
13837
13876
  # @option params [Integer] :number_of_workers
13838
13877
  # The number of workers of a defined `workerType` that are allocated
13839
13878
  # when a job runs.
@@ -13869,7 +13908,7 @@ module Aws::Glue
13869
13908
  # notification_property: {
13870
13909
  # notify_delay_after: 1,
13871
13910
  # },
13872
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X
13911
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X, Z.2X
13873
13912
  # number_of_workers: 1,
13874
13913
  # execution_class: "FLEX", # accepts FLEX, STANDARD
13875
13914
  # })
@@ -15195,7 +15234,7 @@ module Aws::Glue
15195
15234
  # role: "RoleString",
15196
15235
  # glue_version: "GlueVersionString",
15197
15236
  # max_capacity: 1.0,
15198
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X
15237
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X, Z.2X
15199
15238
  # number_of_workers: 1,
15200
15239
  # timeout: 1,
15201
15240
  # max_retries: 1,
@@ -15824,7 +15863,7 @@ module Aws::Glue
15824
15863
  params: params,
15825
15864
  config: config)
15826
15865
  context[:gem_name] = 'aws-sdk-glue'
15827
- context[:gem_version] = '1.138.0'
15866
+ context[:gem_version] = '1.139.0'
15828
15867
  Seahorse::Client::Request.new(handlers, context)
15829
15868
  end
15830
15869
 
@@ -881,6 +881,7 @@ module Aws::Glue
881
881
  RunId = Shapes::StringShape.new(name: 'RunId')
882
882
  RunStatementRequest = Shapes::StructureShape.new(name: 'RunStatementRequest')
883
883
  RunStatementResponse = Shapes::StructureShape.new(name: 'RunStatementResponse')
884
+ RuntimeNameString = Shapes::StringShape.new(name: 'RuntimeNameString')
884
885
  S3CatalogDeltaSource = Shapes::StructureShape.new(name: 'S3CatalogDeltaSource')
885
886
  S3CatalogHudiSource = Shapes::StructureShape.new(name: 'S3CatalogHudiSource')
886
887
  S3CatalogSource = Shapes::StructureShape.new(name: 'S3CatalogSource')
@@ -3639,6 +3640,7 @@ module Aws::Glue
3639
3640
  JobCommand.add_member(:name, Shapes::ShapeRef.new(shape: GenericString, location_name: "Name"))
3640
3641
  JobCommand.add_member(:script_location, Shapes::ShapeRef.new(shape: ScriptLocationString, location_name: "ScriptLocation"))
3641
3642
  JobCommand.add_member(:python_version, Shapes::ShapeRef.new(shape: PythonVersionString, location_name: "PythonVersion"))
3643
+ JobCommand.add_member(:runtime, Shapes::ShapeRef.new(shape: RuntimeNameString, location_name: "Runtime"))
3642
3644
  JobCommand.struct_class = Types::JobCommand
3643
3645
 
3644
3646
  JobList.member = Shapes::ShapeRef.new(shape: Job)
@@ -4251,7 +4251,8 @@ module Aws::Glue
4251
4251
  # @return [Types::JobCommand]
4252
4252
  #
4253
4253
  # @!attribute [rw] default_arguments
4254
- # The default arguments for this job.
4254
+ # The default arguments for every run of this job, specified as
4255
+ # name-value pairs.
4255
4256
  #
4256
4257
  # You can specify arguments here that your own job-execution script
4257
4258
  # consumes, as well as arguments that Glue itself consumes.
@@ -4265,19 +4266,24 @@ module Aws::Glue
4265
4266
  # arguments, see the [Calling Glue APIs in Python][1] topic in the
4266
4267
  # developer guide.
4267
4268
  #
4268
- # For information about the key-value pairs that Glue consumes to set
4269
- # up your job, see the [Special Parameters Used by Glue][2] topic in
4270
- # the developer guide.
4269
+ # For information about the arguments you can provide to this field
4270
+ # when configuring Spark jobs, see the [Special Parameters Used by
4271
+ # Glue][2] topic in the developer guide.
4272
+ #
4273
+ # For information about the arguments you can provide to this field
4274
+ # when configuring Ray jobs, see [Using job parameters in Ray jobs][3]
4275
+ # in the developer guide.
4271
4276
  #
4272
4277
  #
4273
4278
  #
4274
4279
  # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html
4275
4280
  # [2]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html
4281
+ # [3]: https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html
4276
4282
  # @return [Hash<String,String>]
4277
4283
  #
4278
4284
  # @!attribute [rw] non_overridable_arguments
4279
- # Non-overridable arguments for this job, specified as name-value
4280
- # pairs.
4285
+ # Arguments for this job that are not overridden when providing job
4286
+ # arguments in a job run, specified as name-value pairs.
4281
4287
  # @return [Hash<String,String>]
4282
4288
  #
4283
4289
  # @!attribute [rw] connections
@@ -4313,13 +4319,18 @@ module Aws::Glue
4313
4319
  # type, the number of Glue data processing units (DPUs) that can be
4314
4320
  # allocated when this job runs. A DPU is a relative measure of
4315
4321
  # processing power that consists of 4 vCPUs of compute capacity and 16
4316
- # GB of memory. For more information, see the [Glue pricing page][1].
4322
+ # GB of memory. For more information, see the [ Glue pricing page][1].
4317
4323
  #
4318
- # Do not set `Max Capacity` if using `WorkerType` and
4324
+ # For Glue version 2.0+ jobs, you cannot specify a `Maximum capacity`.
4325
+ # Instead, you should specify a `Worker type` and the `Number of
4326
+ # workers`.
4327
+ #
4328
+ # Do not set `MaxCapacity` if using `WorkerType` and
4319
4329
  # `NumberOfWorkers`.
4320
4330
  #
4321
4331
  # The value that can be allocated for `MaxCapacity` depends on whether
4322
- # you are running a Python shell job or an Apache Spark ETL job:
4332
+ # you are running a Python shell job, an Apache Spark ETL job, or an
4333
+ # Apache Spark streaming ETL job:
4323
4334
  #
4324
4335
  # * When you specify a Python shell job
4325
4336
  # (`JobCommand.Name`="pythonshell"), you can allocate either
@@ -4327,14 +4338,10 @@ module Aws::Glue
4327
4338
  #
4328
4339
  # * When you specify an Apache Spark ETL job
4329
4340
  # (`JobCommand.Name`="glueetl") or Apache Spark streaming ETL job
4330
- # (`JobCommand.Name`="gluestreaming"), you can allocate a minimum
4331
- # of 2 DPUs. The default is 10 DPUs. This job type cannot have a
4341
+ # (`JobCommand.Name`="gluestreaming"), you can allocate from 2 to
4342
+ # 100 DPUs. The default is 10 DPUs. This job type cannot have a
4332
4343
  # fractional DPU allocation.
4333
4344
  #
4334
- # For Glue version 2.0 jobs, you cannot instead specify a `Maximum
4335
- # capacity`. Instead, you should specify a `Worker type` and the
4336
- # `Number of workers`.
4337
- #
4338
4345
  #
4339
4346
  #
4340
4347
  # [1]: https://aws.amazon.com/glue/pricing/
@@ -4360,9 +4367,14 @@ module Aws::Glue
4360
4367
  # @return [Types::NotificationProperty]
4361
4368
  #
4362
4369
  # @!attribute [rw] glue_version
4363
- # Glue version determines the versions of Apache Spark and Python that
4364
- # Glue supports. The Python version indicates the version supported
4365
- # for jobs of type Spark.
4370
+ # In Spark jobs, `GlueVersion` determines the versions of Apache Spark
4371
+ # and Python that Glue available in a job. The Python version
4372
+ # indicates the version supported for jobs of type Spark.
4373
+ #
4374
+ # Ray jobs should set `GlueVersion` to `4.0` or greater. However, the
4375
+ # versions of Ray, Python and additional libraries available in your
4376
+ # Ray job are determined by the `Runtime` parameter of the Job
4377
+ # command.
4366
4378
  #
4367
4379
  # For more information about the available Glue versions and
4368
4380
  # corresponding Spark and Python versions, see [Glue version][1] in
@@ -4383,7 +4395,8 @@ module Aws::Glue
4383
4395
  #
4384
4396
  # @!attribute [rw] worker_type
4385
4397
  # The type of predefined worker that is allocated when a job runs.
4386
- # Accepts a value of Standard, G.1X, G.2X, or G.025X.
4398
+ # Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs.
4399
+ # Accepts the value Z.2X for Ray jobs.
4387
4400
  #
4388
4401
  # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB
4389
4402
  # of memory and a 50GB disk, and 2 executors per worker.
@@ -4401,6 +4414,10 @@ module Aws::Glue
4401
4414
  # worker. We recommend this worker type for low volume streaming
4402
4415
  # jobs. This worker type is only available for Glue version 3.0
4403
4416
  # streaming jobs.
4417
+ #
4418
+ # * For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPU, 64
4419
+ # GB of m emory, 128 GB disk), and provides up to 8 Ray workers
4420
+ # based on the autoscaler.
4404
4421
  # @return [String]
4405
4422
  #
4406
4423
  # @!attribute [rw] code_gen_configuration_nodes
@@ -12165,28 +12182,39 @@ module Aws::Glue
12165
12182
  # @return [Types::JobCommand]
12166
12183
  #
12167
12184
  # @!attribute [rw] default_arguments
12168
- # The default arguments for this job, specified as name-value pairs.
12185
+ # The default arguments for every run of this job, specified as
12186
+ # name-value pairs.
12169
12187
  #
12170
12188
  # You can specify arguments here that your own job-execution script
12171
12189
  # consumes, as well as arguments that Glue itself consumes.
12172
12190
  #
12191
+ # Job arguments may be logged. Do not pass plaintext secrets as
12192
+ # arguments. Retrieve secrets from a Glue Connection, Secrets Manager
12193
+ # or other secret management mechanism if you intend to keep them
12194
+ # within the Job.
12195
+ #
12173
12196
  # For information about how to specify and consume your own Job
12174
12197
  # arguments, see the [Calling Glue APIs in Python][1] topic in the
12175
12198
  # developer guide.
12176
12199
  #
12177
- # For information about the key-value pairs that Glue consumes to set
12178
- # up your job, see the [Special Parameters Used by Glue][2] topic in
12179
- # the developer guide.
12200
+ # For information about the arguments you can provide to this field
12201
+ # when configuring Spark jobs, see the [Special Parameters Used by
12202
+ # Glue][2] topic in the developer guide.
12203
+ #
12204
+ # For information about the arguments you can provide to this field
12205
+ # when configuring Ray jobs, see [Using job parameters in Ray jobs][3]
12206
+ # in the developer guide.
12180
12207
  #
12181
12208
  #
12182
12209
  #
12183
12210
  # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html
12184
12211
  # [2]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html
12212
+ # [3]: https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html
12185
12213
  # @return [Hash<String,String>]
12186
12214
  #
12187
12215
  # @!attribute [rw] non_overridable_arguments
12188
- # Non-overridable arguments for this job, specified as name-value
12189
- # pairs.
12216
+ # Arguments for this job that are not overridden when providing job
12217
+ # arguments in a job run, specified as name-value pairs.
12190
12218
  # @return [Hash<String,String>]
12191
12219
  #
12192
12220
  # @!attribute [rw] connections
@@ -12224,7 +12252,7 @@ module Aws::Glue
12224
12252
  # type, the number of Glue data processing units (DPUs) that can be
12225
12253
  # allocated when this job runs. A DPU is a relative measure of
12226
12254
  # processing power that consists of 4 vCPUs of compute capacity and 16
12227
- # GB of memory. For more information, see the [Glue pricing page][1].
12255
+ # GB of memory. For more information, see the [ Glue pricing page][1].
12228
12256
  #
12229
12257
  # For Glue version 2.0 or later jobs, you cannot specify a `Maximum
12230
12258
  # capacity`. Instead, you should specify a `Worker type` and the
@@ -12254,7 +12282,8 @@ module Aws::Glue
12254
12282
  #
12255
12283
  # @!attribute [rw] worker_type
12256
12284
  # The type of predefined worker that is allocated when a job runs.
12257
- # Accepts a value of Standard, G.1X, G.2X, or G.025X.
12285
+ # Accepts a value of Standard, G.1X, G.2X, G.4X, G.8X, or G.025X for
12286
+ # Spark jobs. Accepts the value Z.2X for Ray jobs.
12258
12287
  #
12259
12288
  # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB
12260
12289
  # of memory and a 50GB disk, and 2 executors per worker.
@@ -12275,20 +12304,30 @@ module Aws::Glue
12275
12304
  # GB of memory, 256 GB disk), and provides 1 executor per worker. We
12276
12305
  # recommend this worker type for jobs whose workloads contain your
12277
12306
  # most demanding transforms, aggregations, joins, and queries. This
12278
- # worker type is available only for Glue version 3.0 or later jobs.
12307
+ # worker type is available only for Glue version 3.0 or later Spark
12308
+ # ETL jobs in the following Amazon Web Services Regions: US East
12309
+ # (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific
12310
+ # (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada
12311
+ # (Central), Europe (Frankfurt), Europe (Ireland), and Europe
12312
+ # (Stockholm).
12279
12313
  #
12280
12314
  # * For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPU,
12281
12315
  # 128 GB of memory, 512 GB disk), and provides 1 executor per
12282
12316
  # worker. We recommend this worker type for jobs whose workloads
12283
12317
  # contain your most demanding transforms, aggregations, joins, and
12284
12318
  # queries. This worker type is available only for Glue version 3.0
12285
- # or later jobs.
12319
+ # or later Spark ETL jobs, in the same Amazon Web Services Regions
12320
+ # as supported for the `G.4X` worker type.
12286
12321
  #
12287
12322
  # * For the `G.025X` worker type, each worker maps to 0.25 DPU (2
12288
12323
  # vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per
12289
12324
  # worker. We recommend this worker type for low volume streaming
12290
12325
  # jobs. This worker type is only available for Glue version 3.0
12291
12326
  # streaming jobs.
12327
+ #
12328
+ # * For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPU, 64
12329
+ # GB of m emory, 128 GB disk), and provides a default of 8 Ray
12330
+ # workers (1 per vCPU).
12292
12331
  # @return [String]
12293
12332
  #
12294
12333
  # @!attribute [rw] number_of_workers
@@ -12306,9 +12345,14 @@ module Aws::Glue
12306
12345
  # @return [Types::NotificationProperty]
12307
12346
  #
12308
12347
  # @!attribute [rw] glue_version
12309
- # Glue version determines the versions of Apache Spark and Python that
12310
- # Glue supports. The Python version indicates the version supported
12311
- # for jobs of type Spark.
12348
+ # In Spark jobs, `GlueVersion` determines the versions of Apache Spark
12349
+ # and Python that Glue available in a job. The Python version
12350
+ # indicates the version supported for jobs of type Spark.
12351
+ #
12352
+ # Ray jobs should set `GlueVersion` to `4.0` or greater. However, the
12353
+ # versions of Ray, Python and additional libraries available in your
12354
+ # Ray job are determined by the `Runtime` parameter of the Job
12355
+ # command.
12312
12356
  #
12313
12357
  # For more information about the available Glue versions and
12314
12358
  # corresponding Spark and Python versions, see [Glue version][1] in
@@ -12446,7 +12490,8 @@ module Aws::Glue
12446
12490
  # @!attribute [rw] name
12447
12491
  # The name of the job command. For an Apache Spark ETL job, this must
12448
12492
  # be `glueetl`. For a Python shell job, it must be `pythonshell`. For
12449
- # an Apache Spark streaming ETL job, this must be `gluestreaming`.
12493
+ # an Apache Spark streaming ETL job, this must be `gluestreaming`. For
12494
+ # a Ray job, this must be `glueray`.
12450
12495
  # @return [String]
12451
12496
  #
12452
12497
  # @!attribute [rw] script_location
@@ -12459,12 +12504,24 @@ module Aws::Glue
12459
12504
  # values are 2 or 3.
12460
12505
  # @return [String]
12461
12506
  #
12507
+ # @!attribute [rw] runtime
12508
+ # In Ray jobs, Runtime is used to specify the versions of Ray, Python
12509
+ # and additional libraries available in your environment. This field
12510
+ # is not used in other job types. For supported runtime environment
12511
+ # values, see [Working with Ray jobs][1] in the Glue Developer Guide.
12512
+ #
12513
+ #
12514
+ #
12515
+ # [1]: https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-runtimes.html
12516
+ # @return [String]
12517
+ #
12462
12518
  # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobCommand AWS API Documentation
12463
12519
  #
12464
12520
  class JobCommand < Struct.new(
12465
12521
  :name,
12466
12522
  :script_location,
12467
- :python_version)
12523
+ :python_version,
12524
+ :runtime)
12468
12525
  SENSITIVE = []
12469
12526
  include Aws::Structure
12470
12527
  end
@@ -12535,18 +12592,28 @@ module Aws::Glue
12535
12592
  # You can specify arguments here that your own job-execution script
12536
12593
  # consumes, as well as arguments that Glue itself consumes.
12537
12594
  #
12538
- # For information about how to specify and consume your own job
12595
+ # Job arguments may be logged. Do not pass plaintext secrets as
12596
+ # arguments. Retrieve secrets from a Glue Connection, Secrets Manager
12597
+ # or other secret management mechanism if you intend to keep them
12598
+ # within the Job.
12599
+ #
12600
+ # For information about how to specify and consume your own Job
12539
12601
  # arguments, see the [Calling Glue APIs in Python][1] topic in the
12540
12602
  # developer guide.
12541
12603
  #
12542
- # For information about the key-value pairs that Glue consumes to set
12543
- # up your job, see the [Special Parameters Used by Glue][2] topic in
12544
- # the developer guide.
12604
+ # For information about the arguments you can provide to this field
12605
+ # when configuring Spark jobs, see the [Special Parameters Used by
12606
+ # Glue][2] topic in the developer guide.
12607
+ #
12608
+ # For information about the arguments you can provide to this field
12609
+ # when configuring Ray jobs, see [Using job parameters in Ray jobs][3]
12610
+ # in the developer guide.
12545
12611
  #
12546
12612
  #
12547
12613
  #
12548
12614
  # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html
12549
12615
  # [2]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html
12616
+ # [3]: https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html
12550
12617
  # @return [Hash<String,String>]
12551
12618
  #
12552
12619
  # @!attribute [rw] error_message
@@ -12586,24 +12653,31 @@ module Aws::Glue
12586
12653
  # @return [Integer]
12587
12654
  #
12588
12655
  # @!attribute [rw] max_capacity
12589
- # The number of Glue data processing units (DPUs) that can be
12656
+ # For Glue version 1.0 or earlier jobs, using the standard worker
12657
+ # type, the number of Glue data processing units (DPUs) that can be
12590
12658
  # allocated when this job runs. A DPU is a relative measure of
12591
12659
  # processing power that consists of 4 vCPUs of compute capacity and 16
12592
- # GB of memory. For more information, see the [Glue pricing page][1].
12660
+ # GB of memory. For more information, see the [ Glue pricing page][1].
12661
+ #
12662
+ # For Glue version 2.0+ jobs, you cannot specify a `Maximum capacity`.
12663
+ # Instead, you should specify a `Worker type` and the `Number of
12664
+ # workers`.
12593
12665
  #
12594
- # Do not set `Max Capacity` if using `WorkerType` and
12666
+ # Do not set `MaxCapacity` if using `WorkerType` and
12595
12667
  # `NumberOfWorkers`.
12596
12668
  #
12597
12669
  # The value that can be allocated for `MaxCapacity` depends on whether
12598
- # you are running a Python shell job or an Apache Spark ETL job:
12670
+ # you are running a Python shell job, an Apache Spark ETL job, or an
12671
+ # Apache Spark streaming ETL job:
12599
12672
  #
12600
12673
  # * When you specify a Python shell job
12601
12674
  # (`JobCommand.Name`="pythonshell"), you can allocate either
12602
12675
  # 0.0625 or 1 DPU. The default is 0.0625 DPU.
12603
12676
  #
12604
12677
  # * When you specify an Apache Spark ETL job
12605
- # (`JobCommand.Name`="glueetl"), you can allocate a minimum of 2
12606
- # DPUs. The default is 10 DPUs. This job type cannot have a
12678
+ # (`JobCommand.Name`="glueetl") or Apache Spark streaming ETL job
12679
+ # (`JobCommand.Name`="gluestreaming"), you can allocate from 2 to
12680
+ # 100 DPUs. The default is 10 DPUs. This job type cannot have a
12607
12681
  # fractional DPU allocation.
12608
12682
  #
12609
12683
  #
@@ -12613,22 +12687,29 @@ module Aws::Glue
12613
12687
  #
12614
12688
  # @!attribute [rw] worker_type
12615
12689
  # The type of predefined worker that is allocated when a job runs.
12616
- # Accepts a value of Standard, G.1X, G.2X, or G.025X.
12690
+ # Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs.
12691
+ # Accepts the value Z.2X for Ray jobs.
12617
12692
  #
12618
12693
  # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB
12619
12694
  # of memory and a 50GB disk, and 2 executors per worker.
12620
12695
  #
12621
- # * For the `G.1X` worker type, each worker provides 4 vCPU, 16 GB of
12622
- # memory and a 64GB disk, and 1 executor per worker.
12696
+ # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPU, 16
12697
+ # GB of memory, 64 GB disk), and provides 1 executor per worker. We
12698
+ # recommend this worker type for memory-intensive jobs.
12623
12699
  #
12624
- # * For the `G.2X` worker type, each worker provides 8 vCPU, 32 GB of
12625
- # memory and a 128GB disk, and 1 executor per worker.
12700
+ # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPU, 32
12701
+ # GB of memory, 128 GB disk), and provides 1 executor per worker. We
12702
+ # recommend this worker type for memory-intensive jobs.
12626
12703
  #
12627
12704
  # * For the `G.025X` worker type, each worker maps to 0.25 DPU (2
12628
12705
  # vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per
12629
12706
  # worker. We recommend this worker type for low volume streaming
12630
12707
  # jobs. This worker type is only available for Glue version 3.0
12631
12708
  # streaming jobs.
12709
+ #
12710
+ # * For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPU, 64
12711
+ # GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one
12712
+ # per vCPU) based on the autoscaler.
12632
12713
  # @return [String]
12633
12714
  #
12634
12715
  # @!attribute [rw] number_of_workers
@@ -12656,9 +12737,14 @@ module Aws::Glue
12656
12737
  # @return [Types::NotificationProperty]
12657
12738
  #
12658
12739
  # @!attribute [rw] glue_version
12659
- # Glue version determines the versions of Apache Spark and Python that
12660
- # Glue supports. The Python version indicates the version supported
12661
- # for jobs of type Spark.
12740
+ # In Spark jobs, `GlueVersion` determines the versions of Apache Spark
12741
+ # and Python that Glue available in a job. The Python version
12742
+ # indicates the version supported for jobs of type Spark.
12743
+ #
12744
+ # Ray jobs should set `GlueVersion` to `4.0` or greater. However, the
12745
+ # versions of Ray, Python and additional libraries available in your
12746
+ # Ray job are determined by the `Runtime` parameter of the Job
12747
+ # command.
12662
12748
  #
12663
12749
  # For more information about the available Glue versions and
12664
12750
  # corresponding Spark and Python versions, see [Glue version][1] in
@@ -12755,28 +12841,39 @@ module Aws::Glue
12755
12841
  # @return [Types::JobCommand]
12756
12842
  #
12757
12843
  # @!attribute [rw] default_arguments
12758
- # The default arguments for this job.
12844
+ # The default arguments for every run of this job, specified as
12845
+ # name-value pairs.
12759
12846
  #
12760
12847
  # You can specify arguments here that your own job-execution script
12761
12848
  # consumes, as well as arguments that Glue itself consumes.
12762
12849
  #
12850
+ # Job arguments may be logged. Do not pass plaintext secrets as
12851
+ # arguments. Retrieve secrets from a Glue Connection, Secrets Manager
12852
+ # or other secret management mechanism if you intend to keep them
12853
+ # within the Job.
12854
+ #
12763
12855
  # For information about how to specify and consume your own Job
12764
12856
  # arguments, see the [Calling Glue APIs in Python][1] topic in the
12765
12857
  # developer guide.
12766
12858
  #
12767
- # For information about the key-value pairs that Glue consumes to set
12768
- # up your job, see the [Special Parameters Used by Glue][2] topic in
12769
- # the developer guide.
12859
+ # For information about the arguments you can provide to this field
12860
+ # when configuring Spark jobs, see the [Special Parameters Used by
12861
+ # Glue][2] topic in the developer guide.
12862
+ #
12863
+ # For information about the arguments you can provide to this field
12864
+ # when configuring Ray jobs, see [Using job parameters in Ray jobs][3]
12865
+ # in the developer guide.
12770
12866
  #
12771
12867
  #
12772
12868
  #
12773
12869
  # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html
12774
12870
  # [2]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html
12871
+ # [3]: https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html
12775
12872
  # @return [Hash<String,String>]
12776
12873
  #
12777
12874
  # @!attribute [rw] non_overridable_arguments
12778
- # Non-overridable arguments for this job, specified as name-value
12779
- # pairs.
12875
+ # Arguments for this job that are not overridden when providing job
12876
+ # arguments in a job run, specified as name-value pairs.
12780
12877
  # @return [Hash<String,String>]
12781
12878
  #
12782
12879
  # @!attribute [rw] connections
@@ -12812,13 +12909,18 @@ module Aws::Glue
12812
12909
  # type, the number of Glue data processing units (DPUs) that can be
12813
12910
  # allocated when this job runs. A DPU is a relative measure of
12814
12911
  # processing power that consists of 4 vCPUs of compute capacity and 16
12815
- # GB of memory. For more information, see the [Glue pricing page][1].
12912
+ # GB of memory. For more information, see the [ Glue pricing page][1].
12913
+ #
12914
+ # For Glue version 2.0+ jobs, you cannot specify a `Maximum capacity`.
12915
+ # Instead, you should specify a `Worker type` and the `Number of
12916
+ # workers`.
12816
12917
  #
12817
- # Do not set `Max Capacity` if using `WorkerType` and
12918
+ # Do not set `MaxCapacity` if using `WorkerType` and
12818
12919
  # `NumberOfWorkers`.
12819
12920
  #
12820
12921
  # The value that can be allocated for `MaxCapacity` depends on whether
12821
- # you are running a Python shell job or an Apache Spark ETL job:
12922
+ # you are running a Python shell job, an Apache Spark ETL job, or an
12923
+ # Apache Spark streaming ETL job:
12822
12924
  #
12823
12925
  # * When you specify a Python shell job
12824
12926
  # (`JobCommand.Name`="pythonshell"), you can allocate either
@@ -12826,14 +12928,10 @@ module Aws::Glue
12826
12928
  #
12827
12929
  # * When you specify an Apache Spark ETL job
12828
12930
  # (`JobCommand.Name`="glueetl") or Apache Spark streaming ETL job
12829
- # (`JobCommand.Name`="gluestreaming"), you can allocate a minimum
12830
- # of 2 DPUs. The default is 10 DPUs. This job type cannot have a
12931
+ # (`JobCommand.Name`="gluestreaming"), you can allocate from 2 to
12932
+ # 100 DPUs. The default is 10 DPUs. This job type cannot have a
12831
12933
  # fractional DPU allocation.
12832
12934
  #
12833
- # For Glue version 2.0 jobs, you cannot instead specify a `Maximum
12834
- # capacity`. Instead, you should specify a `Worker type` and the
12835
- # `Number of workers`.
12836
- #
12837
12935
  #
12838
12936
  #
12839
12937
  # [1]: https://aws.amazon.com/glue/pricing/
@@ -12841,7 +12939,8 @@ module Aws::Glue
12841
12939
  #
12842
12940
  # @!attribute [rw] worker_type
12843
12941
  # The type of predefined worker that is allocated when a job runs.
12844
- # Accepts a value of Standard, G.1X, G.2X, or G.025X.
12942
+ # Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs.
12943
+ # Accepts the value Z.2X for Ray jobs.
12845
12944
  #
12846
12945
  # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB
12847
12946
  # of memory and a 50GB disk, and 2 executors per worker.
@@ -12859,6 +12958,10 @@ module Aws::Glue
12859
12958
  # worker. We recommend this worker type for low volume streaming
12860
12959
  # jobs. This worker type is only available for Glue version 3.0
12861
12960
  # streaming jobs.
12961
+ #
12962
+ # * For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPU, 64
12963
+ # GB of m emory, 128 GB disk), and provides up to 8 Ray workers
12964
+ # based on the autoscaler.
12862
12965
  # @return [String]
12863
12966
  #
12864
12967
  # @!attribute [rw] number_of_workers
@@ -12876,14 +12979,22 @@ module Aws::Glue
12876
12979
  # @return [Types::NotificationProperty]
12877
12980
  #
12878
12981
  # @!attribute [rw] glue_version
12879
- # Glue version determines the versions of Apache Spark and Python that
12880
- # Glue supports. The Python version indicates the version supported
12881
- # for jobs of type Spark.
12982
+ # In Spark jobs, `GlueVersion` determines the versions of Apache Spark
12983
+ # and Python that Glue available in a job. The Python version
12984
+ # indicates the version supported for jobs of type Spark.
12985
+ #
12986
+ # Ray jobs should set `GlueVersion` to `4.0` or greater. However, the
12987
+ # versions of Ray, Python and additional libraries available in your
12988
+ # Ray job are determined by the `Runtime` parameter of the Job
12989
+ # command.
12882
12990
  #
12883
12991
  # For more information about the available Glue versions and
12884
12992
  # corresponding Spark and Python versions, see [Glue version][1] in
12885
12993
  # the developer guide.
12886
12994
  #
12995
+ # Jobs that are created without specifying a Glue version default to
12996
+ # Glue 0.9.
12997
+ #
12887
12998
  #
12888
12999
  #
12889
13000
  # [1]: https://docs.aws.amazon.com/glue/latest/dg/add-job.html
@@ -18340,7 +18451,7 @@ module Aws::Glue
18340
18451
  # @return [String]
18341
18452
  #
18342
18453
  # @!attribute [rw] arguments
18343
- # The job arguments specifically for this run. For this job run, they
18454
+ # The job arguments associated with this run. For this job run, they
18344
18455
  # replace the default arguments set in the job definition itself.
18345
18456
  #
18346
18457
  # You can specify arguments here that your own job-execution script
@@ -18355,14 +18466,19 @@ module Aws::Glue
18355
18466
  # arguments, see the [Calling Glue APIs in Python][1] topic in the
18356
18467
  # developer guide.
18357
18468
  #
18358
- # For information about the key-value pairs that Glue consumes to set
18359
- # up your job, see the [Special Parameters Used by Glue][2] topic in
18360
- # the developer guide.
18469
+ # For information about the arguments you can provide to this field
18470
+ # when configuring Spark jobs, see the [Special Parameters Used by
18471
+ # Glue][2] topic in the developer guide.
18472
+ #
18473
+ # For information about the arguments you can provide to this field
18474
+ # when configuring Ray jobs, see [Using job parameters in Ray jobs][3]
18475
+ # in the developer guide.
18361
18476
  #
18362
18477
  #
18363
18478
  #
18364
18479
  # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html
18365
18480
  # [2]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html
18481
+ # [3]: https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html
18366
18482
  # @return [Hash<String,String>]
18367
18483
  #
18368
18484
  # @!attribute [rw] allocated_capacity
@@ -18390,24 +18506,31 @@ module Aws::Glue
18390
18506
  # @return [Integer]
18391
18507
  #
18392
18508
  # @!attribute [rw] max_capacity
18393
- # The number of Glue data processing units (DPUs) that can be
18509
+ # For Glue version 1.0 or earlier jobs, using the standard worker
18510
+ # type, the number of Glue data processing units (DPUs) that can be
18394
18511
  # allocated when this job runs. A DPU is a relative measure of
18395
18512
  # processing power that consists of 4 vCPUs of compute capacity and 16
18396
- # GB of memory. For more information, see the [Glue pricing page][1].
18513
+ # GB of memory. For more information, see the [ Glue pricing page][1].
18397
18514
  #
18398
- # Do not set `Max Capacity` if using `WorkerType` and
18515
+ # For Glue version 2.0+ jobs, you cannot specify a `Maximum capacity`.
18516
+ # Instead, you should specify a `Worker type` and the `Number of
18517
+ # workers`.
18518
+ #
18519
+ # Do not set `MaxCapacity` if using `WorkerType` and
18399
18520
  # `NumberOfWorkers`.
18400
18521
  #
18401
18522
  # The value that can be allocated for `MaxCapacity` depends on whether
18402
- # you are running a Python shell job, or an Apache Spark ETL job:
18523
+ # you are running a Python shell job, an Apache Spark ETL job, or an
18524
+ # Apache Spark streaming ETL job:
18403
18525
  #
18404
18526
  # * When you specify a Python shell job
18405
18527
  # (`JobCommand.Name`="pythonshell"), you can allocate either
18406
18528
  # 0.0625 or 1 DPU. The default is 0.0625 DPU.
18407
18529
  #
18408
18530
  # * When you specify an Apache Spark ETL job
18409
- # (`JobCommand.Name`="glueetl"), you can allocate a minimum of 2
18410
- # DPUs. The default is 10 DPUs. This job type cannot have a
18531
+ # (`JobCommand.Name`="glueetl") or Apache Spark streaming ETL job
18532
+ # (`JobCommand.Name`="gluestreaming"), you can allocate from 2 to
18533
+ # 100 DPUs. The default is 10 DPUs. This job type cannot have a
18411
18534
  # fractional DPU allocation.
18412
18535
  #
18413
18536
  #
@@ -18426,22 +18549,29 @@ module Aws::Glue
18426
18549
  #
18427
18550
  # @!attribute [rw] worker_type
18428
18551
  # The type of predefined worker that is allocated when a job runs.
18429
- # Accepts a value of Standard, G.1X, G.2X, or G.025X.
18552
+ # Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs.
18553
+ # Accepts the value Z.2X for Ray jobs.
18430
18554
  #
18431
18555
  # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB
18432
18556
  # of memory and a 50GB disk, and 2 executors per worker.
18433
18557
  #
18434
- # * For the `G.1X` worker type, each worker provides 4 vCPU, 16 GB of
18435
- # memory and a 64GB disk, and 1 executor per worker.
18558
+ # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPU, 16
18559
+ # GB of memory, 64 GB disk), and provides 1 executor per worker. We
18560
+ # recommend this worker type for memory-intensive jobs.
18436
18561
  #
18437
- # * For the `G.2X` worker type, each worker provides 8 vCPU, 32 GB of
18438
- # memory and a 128GB disk, and 1 executor per worker.
18562
+ # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPU, 32
18563
+ # GB of memory, 128 GB disk), and provides 1 executor per worker. We
18564
+ # recommend this worker type for memory-intensive jobs.
18439
18565
  #
18440
18566
  # * For the `G.025X` worker type, each worker maps to 0.25 DPU (2
18441
18567
  # vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per
18442
18568
  # worker. We recommend this worker type for low volume streaming
18443
18569
  # jobs. This worker type is only available for Glue version 3.0
18444
18570
  # streaming jobs.
18571
+ #
18572
+ # * For the `Z.2X` worker type, each worker maps to 2 DPU (8vCPU, 64
18573
+ # GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one
18574
+ # per vCPU) based on the autoscaler.
18445
18575
  # @return [String]
18446
18576
  #
18447
18577
  # @!attribute [rw] number_of_workers
data/lib/aws-sdk-glue.rb CHANGED
@@ -52,6 +52,6 @@ require_relative 'aws-sdk-glue/customizations'
52
52
  # @!group service
53
53
  module Aws::Glue
54
54
 
55
- GEM_VERSION = '1.138.0'
55
+ GEM_VERSION = '1.139.0'
56
56
 
57
57
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-sdk-glue
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.138.0
4
+ version: 1.139.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon Web Services
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-05-25 00:00:00.000000000 Z
11
+ date: 2023-05-30 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aws-sdk-core