aws-sdk-glue 1.137.0 → 1.139.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a5c0c69fee6c9e04e563848f0f4fbc2703e28acb2736305bb7ce4e3a7e3b8793
4
- data.tar.gz: 49c3bcb7156e5d7104babfc011bc32d8d11237cbdfbe0c0e652c6903262e43a2
3
+ metadata.gz: 1bd041bc46f2d935400f92415f17c4c06c164ad094d739eb69883e13e4b9c7c5
4
+ data.tar.gz: 192981a8963452e7aa09501ecf77b0df8f271411440d4579a949ad4321fb50a9
5
5
  SHA512:
6
- metadata.gz: a7587242c2b02934cfcb4f31a92ab274cd792f882cf3f9e5a6fa2c78cf90acc3a0a06a794418d0de43fd0e0b5451fbcb7e647be57f3f25a4042c745964326dbf
7
- data.tar.gz: 996f7227ce8a9b720e66f4d96dcb519dfd9ebc2a0434ee767b4b52f00c8257e8e44357280c4b428c89c61cd6831e37cb75a85880a60f1eff06cc3f371d34e1b4
6
+ metadata.gz: 99877713a3c5d842165d26438011951056941ad9472f02d082fb674df3bff08e131ad20e7ef463cfe945e90d52bc5aa658cc3d0af8e8bd0924c7aaf158c965dd
7
+ data.tar.gz: d8a5b642a7049875f2160be7892edb0a0379e3c18cdc0257c24536ee67bcd907d6f09adb99885a2aa1623bef897723b5aa4be731f6be7479a1c3b5192972cafc
data/CHANGELOG.md CHANGED
@@ -1,6 +1,16 @@
1
1
  Unreleased Changes
2
2
  ------------------
3
3
 
4
+ 1.139.0 (2023-05-30)
5
+ ------------------
6
+
7
+ * Feature - Added Runtime parameter to allow selection of Ray Runtime
8
+
9
+ 1.138.0 (2023-05-25)
10
+ ------------------
11
+
12
+ * Feature - Added ability to create data quality rulesets for shared, cross-account Glue Data Catalog tables. Added support for dataset comparison rules through a new parameter called AdditionalDataSources. Enhanced the data quality results with a map containing profiled metric values.
13
+
4
14
  1.137.0 (2023-05-16)
5
15
  ------------------
6
16
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.137.0
1
+ 1.139.0
@@ -912,6 +912,8 @@ module Aws::Glue
912
912
  # resp.results[0].rule_results[0].description #=> String
913
913
  # resp.results[0].rule_results[0].evaluation_message #=> String
914
914
  # resp.results[0].rule_results[0].result #=> String, one of "PASS", "FAIL", "ERROR"
915
+ # resp.results[0].rule_results[0].evaluated_metrics #=> Hash
916
+ # resp.results[0].rule_results[0].evaluated_metrics["NameString"] #=> Float
915
917
  # resp.results_not_found #=> Array
916
918
  # resp.results_not_found[0] #=> String
917
919
  #
@@ -958,7 +960,7 @@ module Aws::Glue
958
960
  # resp.dev_endpoints[0].zeppelin_remote_spark_interpreter_port #=> Integer
959
961
  # resp.dev_endpoints[0].public_address #=> String
960
962
  # resp.dev_endpoints[0].status #=> String
961
- # resp.dev_endpoints[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
963
+ # resp.dev_endpoints[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
962
964
  # resp.dev_endpoints[0].glue_version #=> String
963
965
  # resp.dev_endpoints[0].number_of_workers #=> Integer
964
966
  # resp.dev_endpoints[0].number_of_nodes #=> Integer
@@ -1022,6 +1024,7 @@ module Aws::Glue
1022
1024
  # resp.jobs[0].command.name #=> String
1023
1025
  # resp.jobs[0].command.script_location #=> String
1024
1026
  # resp.jobs[0].command.python_version #=> String
1027
+ # resp.jobs[0].command.runtime #=> String
1025
1028
  # resp.jobs[0].default_arguments #=> Hash
1026
1029
  # resp.jobs[0].default_arguments["GenericString"] #=> String
1027
1030
  # resp.jobs[0].non_overridable_arguments #=> Hash
@@ -1032,7 +1035,7 @@ module Aws::Glue
1032
1035
  # resp.jobs[0].allocated_capacity #=> Integer
1033
1036
  # resp.jobs[0].timeout #=> Integer
1034
1037
  # resp.jobs[0].max_capacity #=> Float
1035
- # resp.jobs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
1038
+ # resp.jobs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
1036
1039
  # resp.jobs[0].number_of_workers #=> Integer
1037
1040
  # resp.jobs[0].security_configuration #=> String
1038
1041
  # resp.jobs[0].notification_property.notify_delay_after #=> Integer
@@ -1775,6 +1778,19 @@ module Aws::Glue
1775
1778
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].amazon_redshift_target.data.selected_columns[0].description #=> String
1776
1779
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].amazon_redshift_target.inputs #=> Array
1777
1780
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].amazon_redshift_target.inputs[0] #=> String
1781
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.name #=> String
1782
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.inputs #=> Array
1783
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.inputs[0] #=> String
1784
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.additional_data_sources #=> Hash
1785
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.additional_data_sources["NodeName"] #=> String
1786
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.ruleset #=> String
1787
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.publishing_options.evaluation_context #=> String
1788
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.publishing_options.results_s3_prefix #=> String
1789
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.publishing_options.cloud_watch_metrics_enabled #=> Boolean
1790
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.publishing_options.results_publishing_enabled #=> Boolean
1791
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.additional_options #=> Hash
1792
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.additional_options["AdditionalOptionKeys"] #=> String
1793
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.stop_job_on_failure_options.stop_job_on_failure_timing #=> String, one of "Immediate", "AfterDataLoad"
1778
1794
  # resp.jobs[0].execution_class #=> String, one of "FLEX", "STANDARD"
1779
1795
  # resp.jobs[0].source_control_details.provider #=> String, one of "GITHUB", "AWS_CODE_COMMIT"
1780
1796
  # resp.jobs[0].source_control_details.repository #=> String
@@ -2052,7 +2068,7 @@ module Aws::Glue
2052
2068
  # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
2053
2069
  # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
2054
2070
  # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
2055
- # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
2071
+ # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
2056
2072
  # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
2057
2073
  # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
2058
2074
  # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
@@ -2120,7 +2136,7 @@ module Aws::Glue
2120
2136
  # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
2121
2137
  # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
2122
2138
  # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
2123
- # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
2139
+ # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
2124
2140
  # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
2125
2141
  # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
2126
2142
  # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
@@ -2871,6 +2887,7 @@ module Aws::Glue
2871
2887
  # target_table: {
2872
2888
  # table_name: "NameString", # required
2873
2889
  # database_name: "NameString", # required
2890
+ # catalog_id: "NameString",
2874
2891
  # },
2875
2892
  # client_token: "HashString",
2876
2893
  # })
@@ -3096,7 +3113,7 @@ module Aws::Glue
3096
3113
  # public_key: "GenericString",
3097
3114
  # public_keys: ["GenericString"],
3098
3115
  # number_of_nodes: 1,
3099
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X
3116
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X, Z.2X
3100
3117
  # glue_version: "GlueVersionString",
3101
3118
  # number_of_workers: 1,
3102
3119
  # extra_python_libs_s3_path: "GenericString",
@@ -3121,7 +3138,7 @@ module Aws::Glue
3121
3138
  # resp.yarn_endpoint_address #=> String
3122
3139
  # resp.zeppelin_remote_spark_interpreter_port #=> Integer
3123
3140
  # resp.number_of_nodes #=> Integer
3124
- # resp.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
3141
+ # resp.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
3125
3142
  # resp.glue_version #=> String
3126
3143
  # resp.number_of_workers #=> Integer
3127
3144
  # resp.availability_zone #=> String
@@ -3167,7 +3184,8 @@ module Aws::Glue
3167
3184
  # The `JobCommand` that runs this job.
3168
3185
  #
3169
3186
  # @option params [Hash<String,String>] :default_arguments
3170
- # The default arguments for this job.
3187
+ # The default arguments for every run of this job, specified as
3188
+ # name-value pairs.
3171
3189
  #
3172
3190
  # You can specify arguments here that your own job-execution script
3173
3191
  # consumes, as well as arguments that Glue itself consumes.
@@ -3181,17 +3199,23 @@ module Aws::Glue
3181
3199
  # arguments, see the [Calling Glue APIs in Python][1] topic in the
3182
3200
  # developer guide.
3183
3201
  #
3184
- # For information about the key-value pairs that Glue consumes to set up
3185
- # your job, see the [Special Parameters Used by Glue][2] topic in the
3202
+ # For information about the arguments you can provide to this field when
3203
+ # configuring Spark jobs, see the [Special Parameters Used by Glue][2]
3204
+ # topic in the developer guide.
3205
+ #
3206
+ # For information about the arguments you can provide to this field when
3207
+ # configuring Ray jobs, see [Using job parameters in Ray jobs][3] in the
3186
3208
  # developer guide.
3187
3209
  #
3188
3210
  #
3189
3211
  #
3190
3212
  # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html
3191
3213
  # [2]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html
3214
+ # [3]: https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html
3192
3215
  #
3193
3216
  # @option params [Hash<String,String>] :non_overridable_arguments
3194
- # Non-overridable arguments for this job, specified as name-value pairs.
3217
+ # Arguments for this job that are not overridden when providing job
3218
+ # arguments in a job run, specified as name-value pairs.
3195
3219
  #
3196
3220
  # @option params [Types::ConnectionsList] :connections
3197
3221
  # The connections used for this job.
@@ -3222,12 +3246,17 @@ module Aws::Glue
3222
3246
  # the number of Glue data processing units (DPUs) that can be allocated
3223
3247
  # when this job runs. A DPU is a relative measure of processing power
3224
3248
  # that consists of 4 vCPUs of compute capacity and 16 GB of memory. For
3225
- # more information, see the [Glue pricing page][1].
3249
+ # more information, see the [ Glue pricing page][1].
3250
+ #
3251
+ # For Glue version 2.0+ jobs, you cannot specify a `Maximum capacity`.
3252
+ # Instead, you should specify a `Worker type` and the `Number of
3253
+ # workers`.
3226
3254
  #
3227
- # Do not set `Max Capacity` if using `WorkerType` and `NumberOfWorkers`.
3255
+ # Do not set `MaxCapacity` if using `WorkerType` and `NumberOfWorkers`.
3228
3256
  #
3229
3257
  # The value that can be allocated for `MaxCapacity` depends on whether
3230
- # you are running a Python shell job or an Apache Spark ETL job:
3258
+ # you are running a Python shell job, an Apache Spark ETL job, or an
3259
+ # Apache Spark streaming ETL job:
3231
3260
  #
3232
3261
  # * When you specify a Python shell job
3233
3262
  # (`JobCommand.Name`="pythonshell"), you can allocate either 0.0625
@@ -3235,14 +3264,10 @@ module Aws::Glue
3235
3264
  #
3236
3265
  # * When you specify an Apache Spark ETL job
3237
3266
  # (`JobCommand.Name`="glueetl") or Apache Spark streaming ETL job
3238
- # (`JobCommand.Name`="gluestreaming"), you can allocate a minimum of
3239
- # 2 DPUs. The default is 10 DPUs. This job type cannot have a
3267
+ # (`JobCommand.Name`="gluestreaming"), you can allocate from 2 to
3268
+ # 100 DPUs. The default is 10 DPUs. This job type cannot have a
3240
3269
  # fractional DPU allocation.
3241
3270
  #
3242
- # For Glue version 2.0 jobs, you cannot instead specify a `Maximum
3243
- # capacity`. Instead, you should specify a `Worker type` and the `Number
3244
- # of workers`.
3245
- #
3246
3271
  #
3247
3272
  #
3248
3273
  # [1]: https://aws.amazon.com/glue/pricing/
@@ -3264,9 +3289,13 @@ module Aws::Glue
3264
3289
  # Specifies configuration properties of a job notification.
3265
3290
  #
3266
3291
  # @option params [String] :glue_version
3267
- # Glue version determines the versions of Apache Spark and Python that
3268
- # Glue supports. The Python version indicates the version supported for
3269
- # jobs of type Spark.
3292
+ # In Spark jobs, `GlueVersion` determines the versions of Apache Spark
3293
+ # and Python that Glue available in a job. The Python version indicates
3294
+ # the version supported for jobs of type Spark.
3295
+ #
3296
+ # Ray jobs should set `GlueVersion` to `4.0` or greater. However, the
3297
+ # versions of Ray, Python and additional libraries available in your Ray
3298
+ # job are determined by the `Runtime` parameter of the Job command.
3270
3299
  #
3271
3300
  # For more information about the available Glue versions and
3272
3301
  # corresponding Spark and Python versions, see [Glue version][1] in the
@@ -3285,7 +3314,8 @@ module Aws::Glue
3285
3314
  #
3286
3315
  # @option params [String] :worker_type
3287
3316
  # The type of predefined worker that is allocated when a job runs.
3288
- # Accepts a value of Standard, G.1X, G.2X, or G.025X.
3317
+ # Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs.
3318
+ # Accepts the value Z.2X for Ray jobs.
3289
3319
  #
3290
3320
  # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB
3291
3321
  # of memory and a 50GB disk, and 2 executors per worker.
@@ -3303,6 +3333,10 @@ module Aws::Glue
3303
3333
  # recommend this worker type for low volume streaming jobs. This
3304
3334
  # worker type is only available for Glue version 3.0 streaming jobs.
3305
3335
  #
3336
+ # * For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPU, 64
3337
+ # GB of m emory, 128 GB disk), and provides up to 8 Ray workers based
3338
+ # on the autoscaler.
3339
+ #
3306
3340
  # @option params [Hash<String,Types::CodeGenConfigurationNode>] :code_gen_configuration_nodes
3307
3341
  # The representation of a directed acyclic graph on which both the Glue
3308
3342
  # Studio visual component and Glue Studio code generation is based.
@@ -3523,7 +3557,7 @@ module Aws::Glue
3523
3557
  # role: "RoleString", # required
3524
3558
  # glue_version: "GlueVersionString",
3525
3559
  # max_capacity: 1.0,
3526
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X
3560
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X, Z.2X
3527
3561
  # number_of_workers: 1,
3528
3562
  # timeout: 1,
3529
3563
  # max_retries: 1,
@@ -4097,7 +4131,7 @@ module Aws::Glue
4097
4131
  # },
4098
4132
  # max_capacity: 1.0,
4099
4133
  # number_of_workers: 1,
4100
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X
4134
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X, Z.2X
4101
4135
  # security_configuration: "NameString",
4102
4136
  # glue_version: "GlueVersionString",
4103
4137
  # tags: {
@@ -6247,6 +6281,8 @@ module Aws::Glue
6247
6281
  # resp.rule_results[0].description #=> String
6248
6282
  # resp.rule_results[0].evaluation_message #=> String
6249
6283
  # resp.rule_results[0].result #=> String, one of "PASS", "FAIL", "ERROR"
6284
+ # resp.rule_results[0].evaluated_metrics #=> Hash
6285
+ # resp.rule_results[0].evaluated_metrics["NameString"] #=> Float
6250
6286
  #
6251
6287
  # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDataQualityResult AWS API Documentation
6252
6288
  #
@@ -6342,6 +6378,7 @@ module Aws::Glue
6342
6378
  # resp.ruleset #=> String
6343
6379
  # resp.target_table.table_name #=> String
6344
6380
  # resp.target_table.database_name #=> String
6381
+ # resp.target_table.catalog_id #=> String
6345
6382
  # resp.created_on #=> Time
6346
6383
  # resp.last_modified_on #=> Time
6347
6384
  # resp.recommendation_run_id #=> String
@@ -6377,6 +6414,7 @@ module Aws::Glue
6377
6414
  # * {Types::GetDataQualityRulesetEvaluationRunResponse#execution_time #execution_time} => Integer
6378
6415
  # * {Types::GetDataQualityRulesetEvaluationRunResponse#ruleset_names #ruleset_names} => Array&lt;String&gt;
6379
6416
  # * {Types::GetDataQualityRulesetEvaluationRunResponse#result_ids #result_ids} => Array&lt;String&gt;
6417
+ # * {Types::GetDataQualityRulesetEvaluationRunResponse#additional_data_sources #additional_data_sources} => Hash&lt;String,Types::DataSource&gt;
6380
6418
  #
6381
6419
  # @example Request syntax with placeholder values
6382
6420
  #
@@ -6408,6 +6446,13 @@ module Aws::Glue
6408
6446
  # resp.ruleset_names[0] #=> String
6409
6447
  # resp.result_ids #=> Array
6410
6448
  # resp.result_ids[0] #=> String
6449
+ # resp.additional_data_sources #=> Hash
6450
+ # resp.additional_data_sources["NameString"].glue_table.database_name #=> String
6451
+ # resp.additional_data_sources["NameString"].glue_table.table_name #=> String
6452
+ # resp.additional_data_sources["NameString"].glue_table.catalog_id #=> String
6453
+ # resp.additional_data_sources["NameString"].glue_table.connection_name #=> String
6454
+ # resp.additional_data_sources["NameString"].glue_table.additional_options #=> Hash
6455
+ # resp.additional_data_sources["NameString"].glue_table.additional_options["NameString"] #=> String
6411
6456
  #
6412
6457
  # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetDataQualityRulesetEvaluationRun AWS API Documentation
6413
6458
  #
@@ -6611,7 +6656,7 @@ module Aws::Glue
6611
6656
  # resp.dev_endpoint.zeppelin_remote_spark_interpreter_port #=> Integer
6612
6657
  # resp.dev_endpoint.public_address #=> String
6613
6658
  # resp.dev_endpoint.status #=> String
6614
- # resp.dev_endpoint.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
6659
+ # resp.dev_endpoint.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
6615
6660
  # resp.dev_endpoint.glue_version #=> String
6616
6661
  # resp.dev_endpoint.number_of_workers #=> Integer
6617
6662
  # resp.dev_endpoint.number_of_nodes #=> Integer
@@ -6682,7 +6727,7 @@ module Aws::Glue
6682
6727
  # resp.dev_endpoints[0].zeppelin_remote_spark_interpreter_port #=> Integer
6683
6728
  # resp.dev_endpoints[0].public_address #=> String
6684
6729
  # resp.dev_endpoints[0].status #=> String
6685
- # resp.dev_endpoints[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
6730
+ # resp.dev_endpoints[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
6686
6731
  # resp.dev_endpoints[0].glue_version #=> String
6687
6732
  # resp.dev_endpoints[0].number_of_workers #=> Integer
6688
6733
  # resp.dev_endpoints[0].number_of_nodes #=> Integer
@@ -6738,6 +6783,7 @@ module Aws::Glue
6738
6783
  # resp.job.command.name #=> String
6739
6784
  # resp.job.command.script_location #=> String
6740
6785
  # resp.job.command.python_version #=> String
6786
+ # resp.job.command.runtime #=> String
6741
6787
  # resp.job.default_arguments #=> Hash
6742
6788
  # resp.job.default_arguments["GenericString"] #=> String
6743
6789
  # resp.job.non_overridable_arguments #=> Hash
@@ -6748,7 +6794,7 @@ module Aws::Glue
6748
6794
  # resp.job.allocated_capacity #=> Integer
6749
6795
  # resp.job.timeout #=> Integer
6750
6796
  # resp.job.max_capacity #=> Float
6751
- # resp.job.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
6797
+ # resp.job.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
6752
6798
  # resp.job.number_of_workers #=> Integer
6753
6799
  # resp.job.security_configuration #=> String
6754
6800
  # resp.job.notification_property.notify_delay_after #=> Integer
@@ -7491,6 +7537,19 @@ module Aws::Glue
7491
7537
  # resp.job.code_gen_configuration_nodes["NodeId"].amazon_redshift_target.data.selected_columns[0].description #=> String
7492
7538
  # resp.job.code_gen_configuration_nodes["NodeId"].amazon_redshift_target.inputs #=> Array
7493
7539
  # resp.job.code_gen_configuration_nodes["NodeId"].amazon_redshift_target.inputs[0] #=> String
7540
+ # resp.job.code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.name #=> String
7541
+ # resp.job.code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.inputs #=> Array
7542
+ # resp.job.code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.inputs[0] #=> String
7543
+ # resp.job.code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.additional_data_sources #=> Hash
7544
+ # resp.job.code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.additional_data_sources["NodeName"] #=> String
7545
+ # resp.job.code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.ruleset #=> String
7546
+ # resp.job.code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.publishing_options.evaluation_context #=> String
7547
+ # resp.job.code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.publishing_options.results_s3_prefix #=> String
7548
+ # resp.job.code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.publishing_options.cloud_watch_metrics_enabled #=> Boolean
7549
+ # resp.job.code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.publishing_options.results_publishing_enabled #=> Boolean
7550
+ # resp.job.code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.additional_options #=> Hash
7551
+ # resp.job.code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.additional_options["AdditionalOptionKeys"] #=> String
7552
+ # resp.job.code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.stop_job_on_failure_options.stop_job_on_failure_timing #=> String, one of "Immediate", "AfterDataLoad"
7494
7553
  # resp.job.execution_class #=> String, one of "FLEX", "STANDARD"
7495
7554
  # resp.job.source_control_details.provider #=> String, one of "GITHUB", "AWS_CODE_COMMIT"
7496
7555
  # resp.job.source_control_details.repository #=> String
@@ -7606,7 +7665,7 @@ module Aws::Glue
7606
7665
  # resp.job_run.execution_time #=> Integer
7607
7666
  # resp.job_run.timeout #=> Integer
7608
7667
  # resp.job_run.max_capacity #=> Float
7609
- # resp.job_run.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
7668
+ # resp.job_run.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
7610
7669
  # resp.job_run.number_of_workers #=> Integer
7611
7670
  # resp.job_run.security_configuration #=> String
7612
7671
  # resp.job_run.log_group_name #=> String
@@ -7672,7 +7731,7 @@ module Aws::Glue
7672
7731
  # resp.job_runs[0].execution_time #=> Integer
7673
7732
  # resp.job_runs[0].timeout #=> Integer
7674
7733
  # resp.job_runs[0].max_capacity #=> Float
7675
- # resp.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
7734
+ # resp.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
7676
7735
  # resp.job_runs[0].number_of_workers #=> Integer
7677
7736
  # resp.job_runs[0].security_configuration #=> String
7678
7737
  # resp.job_runs[0].log_group_name #=> String
@@ -7726,6 +7785,7 @@ module Aws::Glue
7726
7785
  # resp.jobs[0].command.name #=> String
7727
7786
  # resp.jobs[0].command.script_location #=> String
7728
7787
  # resp.jobs[0].command.python_version #=> String
7788
+ # resp.jobs[0].command.runtime #=> String
7729
7789
  # resp.jobs[0].default_arguments #=> Hash
7730
7790
  # resp.jobs[0].default_arguments["GenericString"] #=> String
7731
7791
  # resp.jobs[0].non_overridable_arguments #=> Hash
@@ -7736,7 +7796,7 @@ module Aws::Glue
7736
7796
  # resp.jobs[0].allocated_capacity #=> Integer
7737
7797
  # resp.jobs[0].timeout #=> Integer
7738
7798
  # resp.jobs[0].max_capacity #=> Float
7739
- # resp.jobs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
7799
+ # resp.jobs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
7740
7800
  # resp.jobs[0].number_of_workers #=> Integer
7741
7801
  # resp.jobs[0].security_configuration #=> String
7742
7802
  # resp.jobs[0].notification_property.notify_delay_after #=> Integer
@@ -8479,6 +8539,19 @@ module Aws::Glue
8479
8539
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].amazon_redshift_target.data.selected_columns[0].description #=> String
8480
8540
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].amazon_redshift_target.inputs #=> Array
8481
8541
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].amazon_redshift_target.inputs[0] #=> String
8542
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.name #=> String
8543
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.inputs #=> Array
8544
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.inputs[0] #=> String
8545
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.additional_data_sources #=> Hash
8546
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.additional_data_sources["NodeName"] #=> String
8547
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.ruleset #=> String
8548
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.publishing_options.evaluation_context #=> String
8549
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.publishing_options.results_s3_prefix #=> String
8550
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.publishing_options.cloud_watch_metrics_enabled #=> Boolean
8551
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.publishing_options.results_publishing_enabled #=> Boolean
8552
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.additional_options #=> Hash
8553
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.additional_options["AdditionalOptionKeys"] #=> String
8554
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].evaluate_data_quality_multi_frame.stop_job_on_failure_options.stop_job_on_failure_timing #=> String, one of "Immediate", "AfterDataLoad"
8482
8555
  # resp.jobs[0].execution_class #=> String, one of "FLEX", "STANDARD"
8483
8556
  # resp.jobs[0].source_control_details.provider #=> String, one of "GITHUB", "AWS_CODE_COMMIT"
8484
8557
  # resp.jobs[0].source_control_details.repository #=> String
@@ -8720,7 +8793,7 @@ module Aws::Glue
8720
8793
  # resp.role #=> String
8721
8794
  # resp.glue_version #=> String
8722
8795
  # resp.max_capacity #=> Float
8723
- # resp.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
8796
+ # resp.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
8724
8797
  # resp.number_of_workers #=> Integer
8725
8798
  # resp.timeout #=> Integer
8726
8799
  # resp.max_retries #=> Integer
@@ -8830,7 +8903,7 @@ module Aws::Glue
8830
8903
  # resp.transforms[0].role #=> String
8831
8904
  # resp.transforms[0].glue_version #=> String
8832
8905
  # resp.transforms[0].max_capacity #=> Float
8833
- # resp.transforms[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
8906
+ # resp.transforms[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
8834
8907
  # resp.transforms[0].number_of_workers #=> Integer
8835
8908
  # resp.transforms[0].timeout #=> Integer
8836
8909
  # resp.transforms[0].max_retries #=> Integer
@@ -11196,7 +11269,7 @@ module Aws::Glue
11196
11269
  # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
11197
11270
  # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
11198
11271
  # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
11199
- # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
11272
+ # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
11200
11273
  # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
11201
11274
  # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
11202
11275
  # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
@@ -11264,7 +11337,7 @@ module Aws::Glue
11264
11337
  # resp.workflow.graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
11265
11338
  # resp.workflow.graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
11266
11339
  # resp.workflow.graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
11267
- # resp.workflow.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
11340
+ # resp.workflow.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
11268
11341
  # resp.workflow.graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
11269
11342
  # resp.workflow.graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
11270
11343
  # resp.workflow.graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
@@ -11385,7 +11458,7 @@ module Aws::Glue
11385
11458
  # resp.run.graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
11386
11459
  # resp.run.graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
11387
11460
  # resp.run.graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
11388
- # resp.run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
11461
+ # resp.run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
11389
11462
  # resp.run.graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
11390
11463
  # resp.run.graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
11391
11464
  # resp.run.graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
@@ -11546,7 +11619,7 @@ module Aws::Glue
11546
11619
  # resp.runs[0].graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
11547
11620
  # resp.runs[0].graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
11548
11621
  # resp.runs[0].graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
11549
- # resp.runs[0].graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X"
11622
+ # resp.runs[0].graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X", "G.4X", "G.8X", "Z.2X"
11550
11623
  # resp.runs[0].graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
11551
11624
  # resp.runs[0].graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
11552
11625
  # resp.runs[0].graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
@@ -12046,6 +12119,7 @@ module Aws::Glue
12046
12119
  # target_table: {
12047
12120
  # table_name: "NameString", # required
12048
12121
  # database_name: "NameString", # required
12122
+ # catalog_id: "NameString",
12049
12123
  # },
12050
12124
  # },
12051
12125
  # tags: {
@@ -12062,6 +12136,7 @@ module Aws::Glue
12062
12136
  # resp.rulesets[0].last_modified_on #=> Time
12063
12137
  # resp.rulesets[0].target_table.table_name #=> String
12064
12138
  # resp.rulesets[0].target_table.database_name #=> String
12139
+ # resp.rulesets[0].target_table.catalog_id #=> String
12065
12140
  # resp.rulesets[0].recommendation_run_id #=> String
12066
12141
  # resp.rulesets[0].rule_count #=> Integer
12067
12142
  # resp.next_token #=> String
@@ -13510,6 +13585,10 @@ module Aws::Glue
13510
13585
  # @option params [required, Array<String>] :ruleset_names
13511
13586
  # A list of ruleset names.
13512
13587
  #
13588
+ # @option params [Hash<String,Types::DataSource>] :additional_data_sources
13589
+ # A map of reference strings to additional data sources you can specify
13590
+ # for an evaluation run.
13591
+ #
13513
13592
  # @return [Types::StartDataQualityRulesetEvaluationRunResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
13514
13593
  #
13515
13594
  # * {Types::StartDataQualityRulesetEvaluationRunResponse#run_id #run_id} => String
@@ -13537,6 +13616,19 @@ module Aws::Glue
13537
13616
  # results_s3_prefix: "UriString",
13538
13617
  # },
13539
13618
  # ruleset_names: ["NameString"], # required
13619
+ # additional_data_sources: {
13620
+ # "NameString" => {
13621
+ # glue_table: { # required
13622
+ # database_name: "NameString", # required
13623
+ # table_name: "NameString", # required
13624
+ # catalog_id: "NameString",
13625
+ # connection_name: "NameString",
13626
+ # additional_options: {
13627
+ # "NameString" => "DescriptionString",
13628
+ # },
13629
+ # },
13630
+ # },
13631
+ # },
13540
13632
  # })
13541
13633
  #
13542
13634
  # @example Response structure
@@ -13668,7 +13760,7 @@ module Aws::Glue
13668
13760
  # The ID of a previous `JobRun` to retry.
13669
13761
  #
13670
13762
  # @option params [Hash<String,String>] :arguments
13671
- # The job arguments specifically for this run. For this job run, they
13763
+ # The job arguments associated with this run. For this job run, they
13672
13764
  # replace the default arguments set in the job definition itself.
13673
13765
  #
13674
13766
  # You can specify arguments here that your own job-execution script
@@ -13683,14 +13775,19 @@ module Aws::Glue
13683
13775
  # arguments, see the [Calling Glue APIs in Python][1] topic in the
13684
13776
  # developer guide.
13685
13777
  #
13686
- # For information about the key-value pairs that Glue consumes to set up
13687
- # your job, see the [Special Parameters Used by Glue][2] topic in the
13778
+ # For information about the arguments you can provide to this field when
13779
+ # configuring Spark jobs, see the [Special Parameters Used by Glue][2]
13780
+ # topic in the developer guide.
13781
+ #
13782
+ # For information about the arguments you can provide to this field when
13783
+ # configuring Ray jobs, see [Using job parameters in Ray jobs][3] in the
13688
13784
  # developer guide.
13689
13785
  #
13690
13786
  #
13691
13787
  #
13692
13788
  # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html
13693
13789
  # [2]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html
13790
+ # [3]: https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html
13694
13791
  #
13695
13792
  # @option params [Integer] :allocated_capacity
13696
13793
  # This field is deprecated. Use `MaxCapacity` instead.
@@ -13714,24 +13811,31 @@ module Aws::Glue
13714
13811
  # jobs is 2,880 minutes (48 hours).
13715
13812
  #
13716
13813
  # @option params [Float] :max_capacity
13717
- # The number of Glue data processing units (DPUs) that can be allocated
13814
+ # For Glue version 1.0 or earlier jobs, using the standard worker type,
13815
+ # the number of Glue data processing units (DPUs) that can be allocated
13718
13816
  # when this job runs. A DPU is a relative measure of processing power
13719
13817
  # that consists of 4 vCPUs of compute capacity and 16 GB of memory. For
13720
- # more information, see the [Glue pricing page][1].
13818
+ # more information, see the [ Glue pricing page][1].
13721
13819
  #
13722
- # Do not set `Max Capacity` if using `WorkerType` and `NumberOfWorkers`.
13820
+ # For Glue version 2.0+ jobs, you cannot specify a `Maximum capacity`.
13821
+ # Instead, you should specify a `Worker type` and the `Number of
13822
+ # workers`.
13823
+ #
13824
+ # Do not set `MaxCapacity` if using `WorkerType` and `NumberOfWorkers`.
13723
13825
  #
13724
13826
  # The value that can be allocated for `MaxCapacity` depends on whether
13725
- # you are running a Python shell job, or an Apache Spark ETL job:
13827
+ # you are running a Python shell job, an Apache Spark ETL job, or an
13828
+ # Apache Spark streaming ETL job:
13726
13829
  #
13727
13830
  # * When you specify a Python shell job
13728
13831
  # (`JobCommand.Name`="pythonshell"), you can allocate either 0.0625
13729
13832
  # or 1 DPU. The default is 0.0625 DPU.
13730
13833
  #
13731
13834
  # * When you specify an Apache Spark ETL job
13732
- # (`JobCommand.Name`="glueetl"), you can allocate a minimum of 2
13733
- # DPUs. The default is 10 DPUs. This job type cannot have a fractional
13734
- # DPU allocation.
13835
+ # (`JobCommand.Name`="glueetl") or Apache Spark streaming ETL job
13836
+ # (`JobCommand.Name`="gluestreaming"), you can allocate from 2 to
13837
+ # 100 DPUs. The default is 10 DPUs. This job type cannot have a
13838
+ # fractional DPU allocation.
13735
13839
  #
13736
13840
  #
13737
13841
  #
@@ -13746,22 +13850,29 @@ module Aws::Glue
13746
13850
  #
13747
13851
  # @option params [String] :worker_type
13748
13852
  # The type of predefined worker that is allocated when a job runs.
13749
- # Accepts a value of Standard, G.1X, G.2X, or G.025X.
13853
+ # Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs.
13854
+ # Accepts the value Z.2X for Ray jobs.
13750
13855
  #
13751
13856
  # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB
13752
13857
  # of memory and a 50GB disk, and 2 executors per worker.
13753
13858
  #
13754
- # * For the `G.1X` worker type, each worker provides 4 vCPU, 16 GB of
13755
- # memory and a 64GB disk, and 1 executor per worker.
13859
+ # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPU, 16 GB
13860
+ # of memory, 64 GB disk), and provides 1 executor per worker. We
13861
+ # recommend this worker type for memory-intensive jobs.
13756
13862
  #
13757
- # * For the `G.2X` worker type, each worker provides 8 vCPU, 32 GB of
13758
- # memory and a 128GB disk, and 1 executor per worker.
13863
+ # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPU, 32 GB
13864
+ # of memory, 128 GB disk), and provides 1 executor per worker. We
13865
+ # recommend this worker type for memory-intensive jobs.
13759
13866
  #
13760
13867
  # * For the `G.025X` worker type, each worker maps to 0.25 DPU (2 vCPU,
13761
13868
  # 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We
13762
13869
  # recommend this worker type for low volume streaming jobs. This
13763
13870
  # worker type is only available for Glue version 3.0 streaming jobs.
13764
13871
  #
13872
+ # * For the `Z.2X` worker type, each worker maps to 2 DPU (8vCPU, 64 GB
13873
+ # of m emory, 128 GB disk), and provides up to 8 Ray workers (one per
13874
+ # vCPU) based on the autoscaler.
13875
+ #
13765
13876
  # @option params [Integer] :number_of_workers
13766
13877
  # The number of workers of a defined `workerType` that are allocated
13767
13878
  # when a job runs.
@@ -13797,7 +13908,7 @@ module Aws::Glue
13797
13908
  # notification_property: {
13798
13909
  # notify_delay_after: 1,
13799
13910
  # },
13800
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X
13911
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X, Z.2X
13801
13912
  # number_of_workers: 1,
13802
13913
  # execution_class: "FLEX", # accepts FLEX, STANDARD
13803
13914
  # })
@@ -15123,7 +15234,7 @@ module Aws::Glue
15123
15234
  # role: "RoleString",
15124
15235
  # glue_version: "GlueVersionString",
15125
15236
  # max_capacity: 1.0,
15126
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X
15237
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X, G.4X, G.8X, Z.2X
15127
15238
  # number_of_workers: 1,
15128
15239
  # timeout: 1,
15129
15240
  # max_retries: 1,
@@ -15752,7 +15863,7 @@ module Aws::Glue
15752
15863
  params: params,
15753
15864
  config: config)
15754
15865
  context[:gem_name] = 'aws-sdk-glue'
15755
- context[:gem_version] = '1.137.0'
15866
+ context[:gem_version] = '1.139.0'
15756
15867
  Seahorse::Client::Request.new(handlers, context)
15757
15868
  end
15758
15869