@aws-sdk/client-glue 3.341.0 → 3.342.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/dist-cjs/models/models_0.js +1 -0
  2. package/dist-es/models/models_0.js +1 -0
  3. package/dist-types/commands/BatchGetDevEndpointsCommand.d.ts +1 -1
  4. package/dist-types/commands/BatchGetJobsCommand.d.ts +2 -1
  5. package/dist-types/commands/BatchGetWorkflowsCommand.d.ts +2 -2
  6. package/dist-types/commands/CreateDevEndpointCommand.d.ts +2 -2
  7. package/dist-types/commands/CreateJobCommand.d.ts +2 -1
  8. package/dist-types/commands/CreateMLTransformCommand.d.ts +1 -1
  9. package/dist-types/commands/CreateSessionCommand.d.ts +1 -1
  10. package/dist-types/commands/GetDevEndpointCommand.d.ts +1 -1
  11. package/dist-types/commands/GetDevEndpointsCommand.d.ts +1 -1
  12. package/dist-types/commands/GetJobCommand.d.ts +2 -1
  13. package/dist-types/commands/GetJobRunCommand.d.ts +1 -1
  14. package/dist-types/commands/GetJobRunsCommand.d.ts +1 -1
  15. package/dist-types/commands/GetJobsCommand.d.ts +2 -1
  16. package/dist-types/commands/GetMLTransformCommand.d.ts +1 -1
  17. package/dist-types/commands/GetMLTransformsCommand.d.ts +1 -1
  18. package/dist-types/commands/GetWorkflowCommand.d.ts +2 -2
  19. package/dist-types/commands/GetWorkflowRunCommand.d.ts +1 -1
  20. package/dist-types/commands/GetWorkflowRunsCommand.d.ts +1 -1
  21. package/dist-types/commands/StartJobRunCommand.d.ts +1 -1
  22. package/dist-types/commands/UpdateJobCommand.d.ts +2 -1
  23. package/dist-types/commands/UpdateMLTransformCommand.d.ts +1 -1
  24. package/dist-types/models/models_0.d.ts +53 -17
  25. package/dist-types/models/models_2.d.ts +145 -61
  26. package/dist-types/ts3.4/models/models_0.d.ts +2 -0
  27. package/package.json +28 -28
@@ -206,6 +206,7 @@ exports.WorkerType = {
206
206
  G_4X: "G.4X",
207
207
  G_8X: "G.8X",
208
208
  Standard: "Standard",
209
+ Z_2X: "Z.2X",
209
210
  };
210
211
  exports.StartingPosition = {
211
212
  EARLIEST: "earliest",
@@ -193,6 +193,7 @@ export const WorkerType = {
193
193
  G_4X: "G.4X",
194
194
  G_8X: "G.8X",
195
195
  Standard: "Standard",
196
+ Z_2X: "Z.2X",
196
197
  };
197
198
  export const StartingPosition = {
198
199
  EARLIEST: "earliest",
@@ -54,7 +54,7 @@ export interface BatchGetDevEndpointsCommandOutput extends BatchGetDevEndpointsR
54
54
  * // ZeppelinRemoteSparkInterpreterPort: Number("int"),
55
55
  * // PublicAddress: "STRING_VALUE",
56
56
  * // Status: "STRING_VALUE",
57
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
57
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
58
58
  * // GlueVersion: "STRING_VALUE",
59
59
  * // NumberOfWorkers: Number("int"),
60
60
  * // NumberOfNodes: Number("int"),
@@ -55,6 +55,7 @@ export interface BatchGetJobsCommandOutput extends BatchGetJobsResponse, __Metad
55
55
  * // Name: "STRING_VALUE",
56
56
  * // ScriptLocation: "STRING_VALUE",
57
57
  * // PythonVersion: "STRING_VALUE",
58
+ * // Runtime: "STRING_VALUE",
58
59
  * // },
59
60
  * // DefaultArguments: { // GenericMap
60
61
  * // "<keys>": "STRING_VALUE",
@@ -71,7 +72,7 @@ export interface BatchGetJobsCommandOutput extends BatchGetJobsResponse, __Metad
71
72
  * // AllocatedCapacity: Number("int"),
72
73
  * // Timeout: Number("int"),
73
74
  * // MaxCapacity: Number("double"),
74
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
75
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
75
76
  * // NumberOfWorkers: Number("int"),
76
77
  * // SecurityConfiguration: "STRING_VALUE",
77
78
  * // NotificationProperty: { // NotificationProperty
@@ -142,7 +142,7 @@ export interface BatchGetWorkflowsCommandOutput extends BatchGetWorkflowsRespons
142
142
  * // ExecutionTime: Number("int"),
143
143
  * // Timeout: Number("int"),
144
144
  * // MaxCapacity: Number("double"),
145
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
145
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
146
146
  * // NumberOfWorkers: Number("int"),
147
147
  * // SecurityConfiguration: "STRING_VALUE",
148
148
  * // LogGroupName: "STRING_VALUE",
@@ -252,7 +252,7 @@ export interface BatchGetWorkflowsCommandOutput extends BatchGetWorkflowsRespons
252
252
  * // ExecutionTime: Number("int"),
253
253
  * // Timeout: Number("int"),
254
254
  * // MaxCapacity: Number("double"),
255
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
255
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
256
256
  * // NumberOfWorkers: Number("int"),
257
257
  * // SecurityConfiguration: "STRING_VALUE",
258
258
  * // LogGroupName: "STRING_VALUE",
@@ -42,7 +42,7 @@ export interface CreateDevEndpointCommandOutput extends CreateDevEndpointRespons
42
42
  * "STRING_VALUE",
43
43
  * ],
44
44
  * NumberOfNodes: Number("int"),
45
- * WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
45
+ * WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
46
46
  * GlueVersion: "STRING_VALUE",
47
47
  * NumberOfWorkers: Number("int"),
48
48
  * ExtraPythonLibsS3Path: "STRING_VALUE",
@@ -68,7 +68,7 @@ export interface CreateDevEndpointCommandOutput extends CreateDevEndpointRespons
68
68
  * // YarnEndpointAddress: "STRING_VALUE",
69
69
  * // ZeppelinRemoteSparkInterpreterPort: Number("int"),
70
70
  * // NumberOfNodes: Number("int"),
71
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
71
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
72
72
  * // GlueVersion: "STRING_VALUE",
73
73
  * // NumberOfWorkers: Number("int"),
74
74
  * // AvailabilityZone: "STRING_VALUE",
@@ -43,6 +43,7 @@ export interface CreateJobCommandOutput extends CreateJobResponse, __MetadataBea
43
43
  * Name: "STRING_VALUE",
44
44
  * ScriptLocation: "STRING_VALUE",
45
45
  * PythonVersion: "STRING_VALUE",
46
+ * Runtime: "STRING_VALUE",
46
47
  * },
47
48
  * DefaultArguments: { // GenericMap
48
49
  * "<keys>": "STRING_VALUE",
@@ -68,7 +69,7 @@ export interface CreateJobCommandOutput extends CreateJobResponse, __MetadataBea
68
69
  * },
69
70
  * GlueVersion: "STRING_VALUE",
70
71
  * NumberOfWorkers: Number("int"),
71
- * WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
72
+ * WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
72
73
  * CodeGenConfigurationNodes: { // CodeGenConfigurationNodes
73
74
  * "<keys>": { // CodeGenConfigurationNode
74
75
  * AthenaConnectorSource: { // AthenaConnectorSource
@@ -66,7 +66,7 @@ export interface CreateMLTransformCommandOutput extends CreateMLTransformRespons
66
66
  * Role: "STRING_VALUE", // required
67
67
  * GlueVersion: "STRING_VALUE",
68
68
  * MaxCapacity: Number("double"),
69
- * WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
69
+ * WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
70
70
  * NumberOfWorkers: Number("int"),
71
71
  * Timeout: Number("int"),
72
72
  * MaxRetries: Number("int"),
@@ -50,7 +50,7 @@ export interface CreateSessionCommandOutput extends CreateSessionResponse, __Met
50
50
  * },
51
51
  * MaxCapacity: Number("double"),
52
52
  * NumberOfWorkers: Number("int"),
53
- * WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
53
+ * WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
54
54
  * SecurityConfiguration: "STRING_VALUE",
55
55
  * GlueVersion: "STRING_VALUE",
56
56
  * Tags: { // TagsMap
@@ -53,7 +53,7 @@ export interface GetDevEndpointCommandOutput extends GetDevEndpointResponse, __M
53
53
  * // ZeppelinRemoteSparkInterpreterPort: Number("int"),
54
54
  * // PublicAddress: "STRING_VALUE",
55
55
  * // Status: "STRING_VALUE",
56
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
56
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
57
57
  * // GlueVersion: "STRING_VALUE",
58
58
  * // NumberOfWorkers: Number("int"),
59
59
  * // NumberOfNodes: Number("int"),
@@ -55,7 +55,7 @@ export interface GetDevEndpointsCommandOutput extends GetDevEndpointsResponse, _
55
55
  * // ZeppelinRemoteSparkInterpreterPort: Number("int"),
56
56
  * // PublicAddress: "STRING_VALUE",
57
57
  * // Status: "STRING_VALUE",
58
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
58
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
59
59
  * // GlueVersion: "STRING_VALUE",
60
60
  * // NumberOfWorkers: Number("int"),
61
61
  * // NumberOfNodes: Number("int"),
@@ -51,6 +51,7 @@ export interface GetJobCommandOutput extends GetJobResponse, __MetadataBearer {
51
51
  * // Name: "STRING_VALUE",
52
52
  * // ScriptLocation: "STRING_VALUE",
53
53
  * // PythonVersion: "STRING_VALUE",
54
+ * // Runtime: "STRING_VALUE",
54
55
  * // },
55
56
  * // DefaultArguments: { // GenericMap
56
57
  * // "<keys>": "STRING_VALUE",
@@ -67,7 +68,7 @@ export interface GetJobCommandOutput extends GetJobResponse, __MetadataBearer {
67
68
  * // AllocatedCapacity: Number("int"),
68
69
  * // Timeout: Number("int"),
69
70
  * // MaxCapacity: Number("double"),
70
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
71
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
71
72
  * // NumberOfWorkers: Number("int"),
72
73
  * // SecurityConfiguration: "STRING_VALUE",
73
74
  * // NotificationProperty: { // NotificationProperty
@@ -62,7 +62,7 @@ export interface GetJobRunCommandOutput extends GetJobRunResponse, __MetadataBea
62
62
  * // ExecutionTime: Number("int"),
63
63
  * // Timeout: Number("int"),
64
64
  * // MaxCapacity: Number("double"),
65
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
65
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
66
66
  * // NumberOfWorkers: Number("int"),
67
67
  * // SecurityConfiguration: "STRING_VALUE",
68
68
  * // LogGroupName: "STRING_VALUE",
@@ -63,7 +63,7 @@ export interface GetJobRunsCommandOutput extends GetJobRunsResponse, __MetadataB
63
63
  * // ExecutionTime: Number("int"),
64
64
  * // Timeout: Number("int"),
65
65
  * // MaxCapacity: Number("double"),
66
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
66
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
67
67
  * // NumberOfWorkers: Number("int"),
68
68
  * // SecurityConfiguration: "STRING_VALUE",
69
69
  * // LogGroupName: "STRING_VALUE",
@@ -53,6 +53,7 @@ export interface GetJobsCommandOutput extends GetJobsResponse, __MetadataBearer
53
53
  * // Name: "STRING_VALUE",
54
54
  * // ScriptLocation: "STRING_VALUE",
55
55
  * // PythonVersion: "STRING_VALUE",
56
+ * // Runtime: "STRING_VALUE",
56
57
  * // },
57
58
  * // DefaultArguments: { // GenericMap
58
59
  * // "<keys>": "STRING_VALUE",
@@ -69,7 +70,7 @@ export interface GetJobsCommandOutput extends GetJobsResponse, __MetadataBearer
69
70
  * // AllocatedCapacity: Number("int"),
70
71
  * // Timeout: Number("int"),
71
72
  * // MaxCapacity: Number("double"),
72
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
73
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
73
74
  * // NumberOfWorkers: Number("int"),
74
75
  * // SecurityConfiguration: "STRING_VALUE",
75
76
  * // NotificationProperty: { // NotificationProperty
@@ -97,7 +97,7 @@ export interface GetMLTransformCommandOutput extends GetMLTransformResponse, __M
97
97
  * // Role: "STRING_VALUE",
98
98
  * // GlueVersion: "STRING_VALUE",
99
99
  * // MaxCapacity: Number("double"),
100
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
100
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
101
101
  * // NumberOfWorkers: Number("int"),
102
102
  * // Timeout: Number("int"),
103
103
  * // MaxRetries: Number("int"),
@@ -120,7 +120,7 @@ export interface GetMLTransformsCommandOutput extends GetMLTransformsResponse, _
120
120
  * // Role: "STRING_VALUE",
121
121
  * // GlueVersion: "STRING_VALUE",
122
122
  * // MaxCapacity: Number("double"),
123
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
123
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
124
124
  * // NumberOfWorkers: Number("int"),
125
125
  * // Timeout: Number("int"),
126
126
  * // MaxRetries: Number("int"),
@@ -139,7 +139,7 @@ export interface GetWorkflowCommandOutput extends GetWorkflowResponse, __Metadat
139
139
  * // ExecutionTime: Number("int"),
140
140
  * // Timeout: Number("int"),
141
141
  * // MaxCapacity: Number("double"),
142
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
142
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
143
143
  * // NumberOfWorkers: Number("int"),
144
144
  * // SecurityConfiguration: "STRING_VALUE",
145
145
  * // LogGroupName: "STRING_VALUE",
@@ -249,7 +249,7 @@ export interface GetWorkflowCommandOutput extends GetWorkflowResponse, __Metadat
249
249
  * // ExecutionTime: Number("int"),
250
250
  * // Timeout: Number("int"),
251
251
  * // MaxCapacity: Number("double"),
252
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
252
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
253
253
  * // NumberOfWorkers: Number("int"),
254
254
  * // SecurityConfiguration: "STRING_VALUE",
255
255
  * // LogGroupName: "STRING_VALUE",
@@ -132,7 +132,7 @@ export interface GetWorkflowRunCommandOutput extends GetWorkflowRunResponse, __M
132
132
  * // ExecutionTime: Number("int"),
133
133
  * // Timeout: Number("int"),
134
134
  * // MaxCapacity: Number("double"),
135
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
135
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
136
136
  * // NumberOfWorkers: Number("int"),
137
137
  * // SecurityConfiguration: "STRING_VALUE",
138
138
  * // LogGroupName: "STRING_VALUE",
@@ -134,7 +134,7 @@ export interface GetWorkflowRunsCommandOutput extends GetWorkflowRunsResponse, _
134
134
  * // ExecutionTime: Number("int"),
135
135
  * // Timeout: Number("int"),
136
136
  * // MaxCapacity: Number("double"),
137
- * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
137
+ * // WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
138
138
  * // NumberOfWorkers: Number("int"),
139
139
  * // SecurityConfiguration: "STRING_VALUE",
140
140
  * // LogGroupName: "STRING_VALUE",
@@ -43,7 +43,7 @@ export interface StartJobRunCommandOutput extends StartJobRunResponse, __Metadat
43
43
  * NotificationProperty: { // NotificationProperty
44
44
  * NotifyDelayAfter: Number("int"),
45
45
  * },
46
- * WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
46
+ * WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
47
47
  * NumberOfWorkers: Number("int"),
48
48
  * ExecutionClass: "FLEX" || "STANDARD",
49
49
  * };
@@ -43,6 +43,7 @@ export interface UpdateJobCommandOutput extends UpdateJobResponse, __MetadataBea
43
43
  * Name: "STRING_VALUE",
44
44
  * ScriptLocation: "STRING_VALUE",
45
45
  * PythonVersion: "STRING_VALUE",
46
+ * Runtime: "STRING_VALUE",
46
47
  * },
47
48
  * DefaultArguments: { // GenericMap
48
49
  * "<keys>": "STRING_VALUE",
@@ -59,7 +60,7 @@ export interface UpdateJobCommandOutput extends UpdateJobResponse, __MetadataBea
59
60
  * AllocatedCapacity: Number("int"),
60
61
  * Timeout: Number("int"),
61
62
  * MaxCapacity: Number("double"),
62
- * WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
63
+ * WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
63
64
  * NumberOfWorkers: Number("int"),
64
65
  * SecurityConfiguration: "STRING_VALUE",
65
66
  * NotificationProperty: { // NotificationProperty
@@ -49,7 +49,7 @@ export interface UpdateMLTransformCommandOutput extends UpdateMLTransformRespons
49
49
  * Role: "STRING_VALUE",
50
50
  * GlueVersion: "STRING_VALUE",
51
51
  * MaxCapacity: Number("double"),
52
- * WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X",
52
+ * WorkerType: "Standard" || "G.1X" || "G.2X" || "G.025X" || "G.4X" || "G.8X" || "Z.2X",
53
53
  * NumberOfWorkers: Number("int"),
54
54
  * Timeout: Number("int"),
55
55
  * MaxRetries: Number("int"),
@@ -1812,6 +1812,7 @@ export declare const WorkerType: {
1812
1812
  readonly G_4X: "G.4X";
1813
1813
  readonly G_8X: "G.8X";
1814
1814
  readonly Standard: "Standard";
1815
+ readonly Z_2X: "Z.2X";
1815
1816
  };
1816
1817
  /**
1817
1818
  * @public
@@ -4549,7 +4550,8 @@ export interface JobCommand {
4549
4550
  /**
4550
4551
  * <p>The name of the job command. For an Apache Spark ETL job, this must be
4551
4552
  * <code>glueetl</code>. For a Python shell job, it must be <code>pythonshell</code>.
4552
- * For an Apache Spark streaming ETL job, this must be <code>gluestreaming</code>.</p>
4553
+ * For an Apache Spark streaming ETL job, this must be <code>gluestreaming</code>. For a Ray job,
4554
+ * this must be <code>glueray</code>.</p>
4553
4555
  */
4554
4556
  Name?: string;
4555
4557
  /**
@@ -4561,6 +4563,13 @@ export interface JobCommand {
4561
4563
  * <p>The Python version being used to run a Python shell job. Allowed values are 2 or 3.</p>
4562
4564
  */
4563
4565
  PythonVersion?: string;
4566
+ /**
4567
+ * <p>In Ray jobs, Runtime is used to specify the versions of Ray, Python and additional
4568
+ * libraries available in your environment. This field is not used in other job types. For
4569
+ * supported runtime environment values, see <a href="https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-runtimes.html">Working with Ray jobs</a>
4570
+ * in the Glue Developer Guide.</p>
4571
+ */
4572
+ Runtime?: string;
4564
4573
  }
4565
4574
  /**
4566
4575
  * @public
@@ -5160,11 +5169,19 @@ export interface JobRun {
5160
5169
  */
5161
5170
  JobRunState?: JobRunState | string;
5162
5171
  /**
5163
- * <p>The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.</p>
5172
+ * <p>The job arguments associated with this run. For this job run, they replace the default
5173
+ * arguments set in the job definition itself.</p>
5164
5174
  * <p>You can specify arguments here that your own job-execution script
5165
5175
  * consumes, as well as arguments that Glue itself consumes.</p>
5166
- * <p>For information about how to specify and consume your own job arguments, see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html">Calling Glue APIs in Python</a> topic in the developer guide.</p>
5167
- * <p>For information about the key-value pairs that Glue consumes to set up your job, see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html">Special Parameters Used by Glue</a> topic in the developer guide.</p>
5176
+ * <p>Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets
5177
+ * from a Glue Connection, Secrets Manager or other secret management
5178
+ * mechanism if you intend to keep them within the Job. </p>
5179
+ * <p>For information about how to specify and consume your own Job arguments, see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html">Calling Glue APIs in Python</a> topic in the developer guide.</p>
5180
+ * <p>For information about the arguments you can provide to this field when configuring Spark jobs,
5181
+ * see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html">Special Parameters Used by Glue</a> topic in the developer guide.</p>
5182
+ * <p>For information about the arguments you can provide to this field when configuring Ray
5183
+ * jobs, see <a href="https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html">Using
5184
+ * job parameters in Ray jobs</a> in the developer guide.</p>
5168
5185
  */
5169
5186
  Arguments?: Record<string, string>;
5170
5187
  /**
@@ -5197,39 +5214,51 @@ export interface JobRun {
5197
5214
  */
5198
5215
  Timeout?: number;
5199
5216
  /**
5200
- * <p>The number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure
5201
- * of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.
5202
- * For more information, see the <a href="https://aws.amazon.com/glue/pricing/">Glue
5203
- * pricing page</a>.</p>
5204
- * <p>Do not set <code>Max Capacity</code> if using <code>WorkerType</code> and <code>NumberOfWorkers</code>.</p>
5217
+ * <p>For Glue version 1.0 or earlier jobs, using the standard worker type, the number of
5218
+ * Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is
5219
+ * a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB
5220
+ * of memory. For more information, see the <a href="https://aws.amazon.com/glue/pricing/">
5221
+ * Glue pricing page</a>.</p>
5222
+ * <p>For Glue version 2.0+ jobs, you cannot specify a <code>Maximum capacity</code>.
5223
+ * Instead, you should specify a <code>Worker type</code> and the <code>Number of workers</code>.</p>
5224
+ * <p>Do not set <code>MaxCapacity</code> if using <code>WorkerType</code> and <code>NumberOfWorkers</code>.</p>
5205
5225
  * <p>The value that can be allocated for <code>MaxCapacity</code> depends on whether you are
5206
- * running a Python shell job or an Apache Spark ETL job:</p>
5226
+ * running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL
5227
+ * job:</p>
5207
5228
  * <ul>
5208
5229
  * <li>
5209
5230
  * <p>When you specify a Python shell job (<code>JobCommand.Name</code>="pythonshell"), you can
5210
- * allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.</p>
5231
+ * allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.</p>
5211
5232
  * </li>
5212
5233
  * <li>
5213
- * <p>When you specify an Apache Spark ETL job (<code>JobCommand.Name</code>="glueetl"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.</p>
5234
+ * <p>When you specify an Apache Spark ETL job (<code>JobCommand.Name</code>="glueetl") or Apache
5235
+ * Spark streaming ETL job (<code>JobCommand.Name</code>="gluestreaming"), you can allocate from 2 to 100 DPUs.
5236
+ * The default is 10 DPUs. This job type cannot have a fractional DPU allocation.</p>
5214
5237
  * </li>
5215
5238
  * </ul>
5216
5239
  */
5217
5240
  MaxCapacity?: number;
5218
5241
  /**
5219
- * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.</p>
5242
+ * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of
5243
+ * Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
5220
5244
  * <ul>
5221
5245
  * <li>
5222
5246
  * <p>For the <code>Standard</code> worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.</p>
5223
5247
  * </li>
5224
5248
  * <li>
5225
- * <p>For the <code>G.1X</code> worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.</p>
5249
+ * <p>For the <code>G.1X</code> worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.</p>
5226
5250
  * </li>
5227
5251
  * <li>
5228
- * <p>For the <code>G.2X</code> worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.</p>
5252
+ * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.</p>
5229
5253
  * </li>
5230
5254
  * <li>
5231
5255
  * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
5232
5256
  * </li>
5257
+ * <li>
5258
+ * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m
5259
+ * emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the
5260
+ * autoscaler.</p>
5261
+ * </li>
5233
5262
  * </ul>
5234
5263
  */
5235
5264
  WorkerType?: WorkerType | string;
@@ -5256,8 +5285,15 @@ export interface JobRun {
5256
5285
  */
5257
5286
  NotificationProperty?: NotificationProperty;
5258
5287
  /**
5259
- * <p>Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark. </p>
5260
- * <p>For more information about the available Glue versions and corresponding Spark and Python versions, see <a href="https://docs.aws.amazon.com/glue/latest/dg/add-job.html">Glue version</a> in the developer guide.</p>
5288
+ * <p>In Spark jobs, <code>GlueVersion</code> determines the versions of Apache Spark and Python
5289
+ * that Glue available in a job. The Python version indicates the version
5290
+ * supported for jobs of type Spark. </p>
5291
+ * <p>Ray jobs should set <code>GlueVersion</code> to <code>4.0</code> or greater. However,
5292
+ * the versions of Ray, Python and additional libraries available in your Ray job are determined
5293
+ * by the <code>Runtime</code> parameter of the Job command.</p>
5294
+ * <p>For more information about the available Glue versions and corresponding
5295
+ * Spark and Python versions, see <a href="https://docs.aws.amazon.com/glue/latest/dg/add-job.html">Glue version</a> in the developer
5296
+ * guide.</p>
5261
5297
  * <p>Jobs that are created without specifying a Glue version default to Glue 0.9.</p>
5262
5298
  */
5263
5299
  GlueVersion?: string;
@@ -2029,15 +2029,19 @@ export interface StartJobRunRequest {
2029
2029
  */
2030
2030
  JobRunId?: string;
2031
2031
  /**
2032
- * <p>The job arguments specifically for this run. For this job run, they replace the default arguments set in the job definition itself.</p>
2032
+ * <p>The job arguments associated with this run. For this job run, they replace the default
2033
+ * arguments set in the job definition itself.</p>
2033
2034
  * <p>You can specify arguments here that your own job-execution script
2034
2035
  * consumes, as well as arguments that Glue itself consumes.</p>
2035
- * <p>Job arguments may be logged. Do not pass plaintext secrets as arguments.
2036
- * Retrieve secrets from a Glue Connection, Secrets Manager or
2037
- * other secret management mechanism if you intend to keep them within the Job.
2038
- * </p>
2036
+ * <p>Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets
2037
+ * from a Glue Connection, Secrets Manager or other secret management
2038
+ * mechanism if you intend to keep them within the Job. </p>
2039
2039
  * <p>For information about how to specify and consume your own Job arguments, see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html">Calling Glue APIs in Python</a> topic in the developer guide.</p>
2040
- * <p>For information about the key-value pairs that Glue consumes to set up your job, see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html">Special Parameters Used by Glue</a> topic in the developer guide.</p>
2040
+ * <p>For information about the arguments you can provide to this field when configuring Spark jobs,
2041
+ * see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html">Special Parameters Used by Glue</a> topic in the developer guide.</p>
2042
+ * <p>For information about the arguments you can provide to this field when configuring Ray
2043
+ * jobs, see <a href="https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html">Using
2044
+ * job parameters in Ray jobs</a> in the developer guide.</p>
2041
2045
  */
2042
2046
  Arguments?: Record<string, string>;
2043
2047
  /**
@@ -2058,20 +2062,26 @@ export interface StartJobRunRequest {
2058
2062
  */
2059
2063
  Timeout?: number;
2060
2064
  /**
2061
- * <p>The number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure
2062
- * of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.
2063
- * For more information, see the <a href="https://aws.amazon.com/glue/pricing/">Glue
2064
- * pricing page</a>.</p>
2065
- * <p>Do not set <code>Max Capacity</code> if using <code>WorkerType</code> and <code>NumberOfWorkers</code>.</p>
2065
+ * <p>For Glue version 1.0 or earlier jobs, using the standard worker type, the number of
2066
+ * Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is
2067
+ * a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB
2068
+ * of memory. For more information, see the <a href="https://aws.amazon.com/glue/pricing/">
2069
+ * Glue pricing page</a>.</p>
2070
+ * <p>For Glue version 2.0+ jobs, you cannot specify a <code>Maximum capacity</code>.
2071
+ * Instead, you should specify a <code>Worker type</code> and the <code>Number of workers</code>.</p>
2072
+ * <p>Do not set <code>MaxCapacity</code> if using <code>WorkerType</code> and <code>NumberOfWorkers</code>.</p>
2066
2073
  * <p>The value that can be allocated for <code>MaxCapacity</code> depends on whether you are
2067
- * running a Python shell job, or an Apache Spark ETL job:</p>
2074
+ * running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL
2075
+ * job:</p>
2068
2076
  * <ul>
2069
2077
  * <li>
2070
2078
  * <p>When you specify a Python shell job (<code>JobCommand.Name</code>="pythonshell"), you can
2071
- * allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.</p>
2079
+ * allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.</p>
2072
2080
  * </li>
2073
2081
  * <li>
2074
- * <p>When you specify an Apache Spark ETL job (<code>JobCommand.Name</code>="glueetl"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.</p>
2082
+ * <p>When you specify an Apache Spark ETL job (<code>JobCommand.Name</code>="glueetl") or Apache
2083
+ * Spark streaming ETL job (<code>JobCommand.Name</code>="gluestreaming"), you can allocate from 2 to 100 DPUs.
2084
+ * The default is 10 DPUs. This job type cannot have a fractional DPU allocation.</p>
2075
2085
  * </li>
2076
2086
  * </ul>
2077
2087
  */
@@ -2086,20 +2096,26 @@ export interface StartJobRunRequest {
2086
2096
  */
2087
2097
  NotificationProperty?: NotificationProperty;
2088
2098
  /**
2089
- * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.</p>
2099
+ * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of
2100
+ * Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
2090
2101
  * <ul>
2091
2102
  * <li>
2092
2103
  * <p>For the <code>Standard</code> worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.</p>
2093
2104
  * </li>
2094
2105
  * <li>
2095
- * <p>For the <code>G.1X</code> worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.</p>
2106
+ * <p>For the <code>G.1X</code> worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.</p>
2096
2107
  * </li>
2097
2108
  * <li>
2098
- * <p>For the <code>G.2X</code> worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.</p>
2109
+ * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.</p>
2099
2110
  * </li>
2100
2111
  * <li>
2101
2112
  * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
2102
2113
  * </li>
2114
+ * <li>
2115
+ * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 DPU (8vCPU, 64 GB of m
2116
+ * emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the
2117
+ * autoscaler.</p>
2118
+ * </li>
2103
2119
  * </ul>
2104
2120
  */
2105
2121
  WorkerType?: WorkerType | string;
@@ -3726,19 +3742,23 @@ export interface CreateJobRequest {
3726
3742
  */
3727
3743
  Command: JobCommand | undefined;
3728
3744
  /**
3729
- * <p>The default arguments for this job.</p>
3745
+ * <p>The default arguments for every run of this job, specified as name-value pairs.</p>
3730
3746
  * <p>You can specify arguments here that your own job-execution script
3731
3747
  * consumes, as well as arguments that Glue itself consumes.</p>
3732
- * <p>Job arguments may be logged. Do not pass plaintext secrets as arguments.
3733
- * Retrieve secrets from a Glue Connection, Secrets Manager or
3734
- * other secret management mechanism if you intend to keep them within the Job.
3735
- * </p>
3748
+ * <p>Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets
3749
+ * from a Glue Connection, Secrets Manager or other secret management
3750
+ * mechanism if you intend to keep them within the Job. </p>
3736
3751
  * <p>For information about how to specify and consume your own Job arguments, see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html">Calling Glue APIs in Python</a> topic in the developer guide.</p>
3737
- * <p>For information about the key-value pairs that Glue consumes to set up your job, see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html">Special Parameters Used by Glue</a> topic in the developer guide.</p>
3752
+ * <p>For information about the arguments you can provide to this field when configuring Spark jobs,
3753
+ * see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html">Special Parameters Used by Glue</a> topic in the developer guide.</p>
3754
+ * <p>For information about the arguments you can provide to this field when configuring Ray
3755
+ * jobs, see <a href="https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html">Using
3756
+ * job parameters in Ray jobs</a> in the developer guide.</p>
3738
3757
  */
3739
3758
  DefaultArguments?: Record<string, string>;
3740
3759
  /**
3741
- * <p>Non-overridable arguments for this job, specified as name-value pairs.</p>
3760
+ * <p>Arguments for this job that are not overridden when providing job arguments
3761
+ * in a job run, specified as name-value pairs.</p>
3742
3762
  */
3743
3763
  NonOverridableArguments?: Record<string, string>;
3744
3764
  /**
@@ -3767,13 +3787,17 @@ export interface CreateJobRequest {
3767
3787
  */
3768
3788
  Timeout?: number;
3769
3789
  /**
3770
- * <p>For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure
3771
- * of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.
3772
- * For more information, see the <a href="https://aws.amazon.com/glue/pricing/">Glue
3773
- * pricing page</a>.</p>
3774
- * <p>Do not set <code>Max Capacity</code> if using <code>WorkerType</code> and <code>NumberOfWorkers</code>.</p>
3790
+ * <p>For Glue version 1.0 or earlier jobs, using the standard worker type, the number of
3791
+ * Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is
3792
+ * a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB
3793
+ * of memory. For more information, see the <a href="https://aws.amazon.com/glue/pricing/">
3794
+ * Glue pricing page</a>.</p>
3795
+ * <p>For Glue version 2.0+ jobs, you cannot specify a <code>Maximum capacity</code>.
3796
+ * Instead, you should specify a <code>Worker type</code> and the <code>Number of workers</code>.</p>
3797
+ * <p>Do not set <code>MaxCapacity</code> if using <code>WorkerType</code> and <code>NumberOfWorkers</code>.</p>
3775
3798
  * <p>The value that can be allocated for <code>MaxCapacity</code> depends on whether you are
3776
- * running a Python shell job or an Apache Spark ETL job:</p>
3799
+ * running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL
3800
+ * job:</p>
3777
3801
  * <ul>
3778
3802
  * <li>
3779
3803
  * <p>When you specify a Python shell job (<code>JobCommand.Name</code>="pythonshell"), you can
@@ -3781,11 +3805,10 @@ export interface CreateJobRequest {
3781
3805
  * </li>
3782
3806
  * <li>
3783
3807
  * <p>When you specify an Apache Spark ETL job (<code>JobCommand.Name</code>="glueetl") or Apache
3784
- * Spark streaming ETL job (<code>JobCommand.Name</code>="gluestreaming"), you can allocate a minimum of 2 DPUs.
3785
- * The default is 10 DPUs. This job type cannot have a fractional DPU allocation.</p>
3808
+ * Spark streaming ETL job (<code>JobCommand.Name</code>="gluestreaming"), you can allocate from 2 to 100 DPUs.
3809
+ * The default is 10 DPUs. This job type cannot have a fractional DPU allocation.</p>
3786
3810
  * </li>
3787
3811
  * </ul>
3788
- * <p>For Glue version 2.0 jobs, you cannot instead specify a <code>Maximum capacity</code>. Instead, you should specify a <code>Worker type</code> and the <code>Number of workers</code>.</p>
3789
3812
  */
3790
3813
  MaxCapacity?: number;
3791
3814
  /**
@@ -3802,8 +3825,15 @@ export interface CreateJobRequest {
3802
3825
  */
3803
3826
  NotificationProperty?: NotificationProperty;
3804
3827
  /**
3805
- * <p>Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark. </p>
3806
- * <p>For more information about the available Glue versions and corresponding Spark and Python versions, see <a href="https://docs.aws.amazon.com/glue/latest/dg/add-job.html">Glue version</a> in the developer guide.</p>
3828
+ * <p>In Spark jobs, <code>GlueVersion</code> determines the versions of Apache Spark and Python
3829
+ * that Glue available in a job. The Python version indicates the version
3830
+ * supported for jobs of type Spark. </p>
3831
+ * <p>Ray jobs should set <code>GlueVersion</code> to <code>4.0</code> or greater. However,
3832
+ * the versions of Ray, Python and additional libraries available in your Ray job are determined
3833
+ * by the <code>Runtime</code> parameter of the Job command.</p>
3834
+ * <p>For more information about the available Glue versions and corresponding
3835
+ * Spark and Python versions, see <a href="https://docs.aws.amazon.com/glue/latest/dg/add-job.html">Glue version</a> in the developer
3836
+ * guide.</p>
3807
3837
  * <p>Jobs that are created without specifying a Glue version default to Glue 0.9.</p>
3808
3838
  */
3809
3839
  GlueVersion?: string;
@@ -3812,7 +3842,8 @@ export interface CreateJobRequest {
3812
3842
  */
3813
3843
  NumberOfWorkers?: number;
3814
3844
  /**
3815
- * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.</p>
3845
+ * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of
3846
+ * Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
3816
3847
  * <ul>
3817
3848
  * <li>
3818
3849
  * <p>For the <code>Standard</code> worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.</p>
@@ -3826,6 +3857,11 @@ export interface CreateJobRequest {
3826
3857
  * <li>
3827
3858
  * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
3828
3859
  * </li>
3860
+ * <li>
3861
+ * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m
3862
+ * emory, 128 GB disk), and provides up to 8 Ray workers based on the
3863
+ * autoscaler.</p>
3864
+ * </li>
3829
3865
  * </ul>
3830
3866
  */
3831
3867
  WorkerType?: WorkerType | string;
@@ -3883,15 +3919,23 @@ export interface Job {
3883
3919
  */
3884
3920
  Command?: JobCommand;
3885
3921
  /**
3886
- * <p>The default arguments for this job, specified as name-value pairs.</p>
3922
+ * <p>The default arguments for every run of this job, specified as name-value pairs.</p>
3887
3923
  * <p>You can specify arguments here that your own job-execution script
3888
3924
  * consumes, as well as arguments that Glue itself consumes.</p>
3925
+ * <p>Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets
3926
+ * from a Glue Connection, Secrets Manager or other secret management
3927
+ * mechanism if you intend to keep them within the Job. </p>
3889
3928
  * <p>For information about how to specify and consume your own Job arguments, see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html">Calling Glue APIs in Python</a> topic in the developer guide.</p>
3890
- * <p>For information about the key-value pairs that Glue consumes to set up your job, see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html">Special Parameters Used by Glue</a> topic in the developer guide.</p>
3929
+ * <p>For information about the arguments you can provide to this field when configuring Spark jobs,
3930
+ * see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html">Special Parameters Used by Glue</a> topic in the developer guide.</p>
3931
+ * <p>For information about the arguments you can provide to this field when configuring Ray
3932
+ * jobs, see <a href="https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html">Using
3933
+ * job parameters in Ray jobs</a> in the developer guide.</p>
3891
3934
  */
3892
3935
  DefaultArguments?: Record<string, string>;
3893
3936
  /**
3894
- * <p>Non-overridable arguments for this job, specified as name-value pairs.</p>
3937
+ * <p>Arguments for this job that are not overridden when providing job arguments
3938
+ * in a job run, specified as name-value pairs.</p>
3895
3939
  */
3896
3940
  NonOverridableArguments?: Record<string, string>;
3897
3941
  /**
@@ -3921,10 +3965,11 @@ export interface Job {
3921
3965
  */
3922
3966
  Timeout?: number;
3923
3967
  /**
3924
- * <p>For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure
3925
- * of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.
3926
- * For more information, see the <a href="https://aws.amazon.com/glue/pricing/">Glue
3927
- * pricing page</a>.</p>
3968
+ * <p>For Glue version 1.0 or earlier jobs, using the standard worker type, the number of
3969
+ * Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is
3970
+ * a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB
3971
+ * of memory. For more information, see the <a href="https://aws.amazon.com/glue/pricing/">
3972
+ * Glue pricing page</a>.</p>
3928
3973
  * <p>For Glue version 2.0 or later jobs, you cannot specify a <code>Maximum capacity</code>.
3929
3974
  * Instead, you should specify a <code>Worker type</code> and the <code>Number of workers</code>.</p>
3930
3975
  * <p>Do not set <code>MaxCapacity</code> if using <code>WorkerType</code> and <code>NumberOfWorkers</code>.</p>
@@ -3945,7 +3990,8 @@ export interface Job {
3945
3990
  */
3946
3991
  MaxCapacity?: number;
3947
3992
  /**
3948
- * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.</p>
3993
+ * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of
3994
+ * Standard, G.1X, G.2X, G.4X, G.8X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
3949
3995
  * <ul>
3950
3996
  * <li>
3951
3997
  * <p>For the <code>Standard</code> worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.</p>
@@ -3957,14 +4003,18 @@ export interface Job {
3957
4003
  * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
3958
4004
  * </li>
3959
4005
  * <li>
3960
- * <p>For the <code>G.4X</code> worker type, each worker maps to 4 DPU (16 vCPU, 64 GB of memory, 256 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later jobs.</p>
4006
+ * <p>For the <code>G.4X</code> worker type, each worker maps to 4 DPU (16 vCPU, 64 GB of memory, 256 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).</p>
3961
4007
  * </li>
3962
4008
  * <li>
3963
- * <p>For the <code>G.8X</code> worker type, each worker maps to 8 DPU (32 vCPU, 128 GB of memory, 512 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later jobs.</p>
4009
+ * <p>For the <code>G.8X</code> worker type, each worker maps to 8 DPU (32 vCPU, 128 GB of memory, 512 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the <code>G.4X</code> worker type.</p>
3964
4010
  * </li>
3965
4011
  * <li>
3966
4012
  * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
3967
4013
  * </li>
4014
+ * <li>
4015
+ * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m
4016
+ * emory, 128 GB disk), and provides a default of 8 Ray workers (1 per vCPU).</p>
4017
+ * </li>
3968
4018
  * </ul>
3969
4019
  */
3970
4020
  WorkerType?: WorkerType | string;
@@ -3982,8 +4032,15 @@ export interface Job {
3982
4032
  */
3983
4033
  NotificationProperty?: NotificationProperty;
3984
4034
  /**
3985
- * <p>Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark. </p>
3986
- * <p>For more information about the available Glue versions and corresponding Spark and Python versions, see <a href="https://docs.aws.amazon.com/glue/latest/dg/add-job.html">Glue version</a> in the developer guide.</p>
4035
+ * <p>In Spark jobs, <code>GlueVersion</code> determines the versions of Apache Spark and Python
4036
+ * that Glue available in a job. The Python version indicates the version
4037
+ * supported for jobs of type Spark. </p>
4038
+ * <p>Ray jobs should set <code>GlueVersion</code> to <code>4.0</code> or greater. However,
4039
+ * the versions of Ray, Python and additional libraries available in your Ray job are determined
4040
+ * by the <code>Runtime</code> parameter of the Job command.</p>
4041
+ * <p>For more information about the available Glue versions and corresponding
4042
+ * Spark and Python versions, see <a href="https://docs.aws.amazon.com/glue/latest/dg/add-job.html">Glue version</a> in the developer
4043
+ * guide.</p>
3987
4044
  * <p>Jobs that are created without specifying a Glue version default to Glue 0.9.</p>
3988
4045
  */
3989
4046
  GlueVersion?: string;
@@ -4031,15 +4088,23 @@ export interface JobUpdate {
4031
4088
  */
4032
4089
  Command?: JobCommand;
4033
4090
  /**
4034
- * <p>The default arguments for this job.</p>
4091
+ * <p>The default arguments for every run of this job, specified as name-value pairs.</p>
4035
4092
  * <p>You can specify arguments here that your own job-execution script
4036
4093
  * consumes, as well as arguments that Glue itself consumes.</p>
4094
+ * <p>Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets
4095
+ * from a Glue Connection, Secrets Manager or other secret management
4096
+ * mechanism if you intend to keep them within the Job. </p>
4037
4097
  * <p>For information about how to specify and consume your own Job arguments, see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html">Calling Glue APIs in Python</a> topic in the developer guide.</p>
4038
- * <p>For information about the key-value pairs that Glue consumes to set up your job, see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html">Special Parameters Used by Glue</a> topic in the developer guide.</p>
4098
+ * <p>For information about the arguments you can provide to this field when configuring Spark jobs,
4099
+ * see the <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html">Special Parameters Used by Glue</a> topic in the developer guide.</p>
4100
+ * <p>For information about the arguments you can provide to this field when configuring Ray
4101
+ * jobs, see <a href="https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html">Using
4102
+ * job parameters in Ray jobs</a> in the developer guide.</p>
4039
4103
  */
4040
4104
  DefaultArguments?: Record<string, string>;
4041
4105
  /**
4042
- * <p>Non-overridable arguments for this job, specified as name-value pairs.</p>
4106
+ * <p>Arguments for this job that are not overridden when providing job arguments
4107
+ * in a job run, specified as name-value pairs.</p>
4043
4108
  */
4044
4109
  NonOverridableArguments?: Record<string, string>;
4045
4110
  /**
@@ -4068,11 +4133,17 @@ export interface JobUpdate {
4068
4133
  */
4069
4134
  Timeout?: number;
4070
4135
  /**
4071
- * <p>For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure
4072
- * of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the <a href="https://aws.amazon.com/glue/pricing/">Glue pricing page</a>.</p>
4073
- * <p>Do not set <code>Max Capacity</code> if using <code>WorkerType</code> and <code>NumberOfWorkers</code>.</p>
4136
+ * <p>For Glue version 1.0 or earlier jobs, using the standard worker type, the number of
4137
+ * Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is
4138
+ * a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB
4139
+ * of memory. For more information, see the <a href="https://aws.amazon.com/glue/pricing/">
4140
+ * Glue pricing page</a>.</p>
4141
+ * <p>For Glue version 2.0+ jobs, you cannot specify a <code>Maximum capacity</code>.
4142
+ * Instead, you should specify a <code>Worker type</code> and the <code>Number of workers</code>.</p>
4143
+ * <p>Do not set <code>MaxCapacity</code> if using <code>WorkerType</code> and <code>NumberOfWorkers</code>.</p>
4074
4144
  * <p>The value that can be allocated for <code>MaxCapacity</code> depends on whether you are
4075
- * running a Python shell job or an Apache Spark ETL job:</p>
4145
+ * running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL
4146
+ * job:</p>
4076
4147
  * <ul>
4077
4148
  * <li>
4078
4149
  * <p>When you specify a Python shell job (<code>JobCommand.Name</code>="pythonshell"), you can
@@ -4080,15 +4151,15 @@ export interface JobUpdate {
4080
4151
  * </li>
4081
4152
  * <li>
4082
4153
  * <p>When you specify an Apache Spark ETL job (<code>JobCommand.Name</code>="glueetl") or Apache
4083
- * Spark streaming ETL job (<code>JobCommand.Name</code>="gluestreaming"), you can allocate a minimum of 2 DPUs.
4084
- * The default is 10 DPUs. This job type cannot have a fractional DPU allocation.</p>
4154
+ * Spark streaming ETL job (<code>JobCommand.Name</code>="gluestreaming"), you can allocate from 2 to 100 DPUs.
4155
+ * The default is 10 DPUs. This job type cannot have a fractional DPU allocation.</p>
4085
4156
  * </li>
4086
4157
  * </ul>
4087
- * <p>For Glue version 2.0 jobs, you cannot instead specify a <code>Maximum capacity</code>. Instead, you should specify a <code>Worker type</code> and the <code>Number of workers</code>.</p>
4088
4158
  */
4089
4159
  MaxCapacity?: number;
4090
4160
  /**
4091
- * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.</p>
4161
+ * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of
4162
+ * Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
4092
4163
  * <ul>
4093
4164
  * <li>
4094
4165
  * <p>For the <code>Standard</code> worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.</p>
@@ -4102,6 +4173,11 @@ export interface JobUpdate {
4102
4173
  * <li>
4103
4174
  * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
4104
4175
  * </li>
4176
+ * <li>
4177
+ * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m
4178
+ * emory, 128 GB disk), and provides up to 8 Ray workers based on the
4179
+ * autoscaler.</p>
4180
+ * </li>
4105
4181
  * </ul>
4106
4182
  */
4107
4183
  WorkerType?: WorkerType | string;
@@ -4119,8 +4195,16 @@ export interface JobUpdate {
4119
4195
  */
4120
4196
  NotificationProperty?: NotificationProperty;
4121
4197
  /**
4122
- * <p>Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark. </p>
4123
- * <p>For more information about the available Glue versions and corresponding Spark and Python versions, see <a href="https://docs.aws.amazon.com/glue/latest/dg/add-job.html">Glue version</a> in the developer guide.</p>
4198
+ * <p>In Spark jobs, <code>GlueVersion</code> determines the versions of Apache Spark and Python
4199
+ * that Glue available in a job. The Python version indicates the version
4200
+ * supported for jobs of type Spark. </p>
4201
+ * <p>Ray jobs should set <code>GlueVersion</code> to <code>4.0</code> or greater. However,
4202
+ * the versions of Ray, Python and additional libraries available in your Ray job are determined
4203
+ * by the <code>Runtime</code> parameter of the Job command.</p>
4204
+ * <p>For more information about the available Glue versions and corresponding
4205
+ * Spark and Python versions, see <a href="https://docs.aws.amazon.com/glue/latest/dg/add-job.html">Glue version</a> in the developer
4206
+ * guide.</p>
4207
+ * <p>Jobs that are created without specifying a Glue version default to Glue 0.9.</p>
4124
4208
  */
4125
4209
  GlueVersion?: string;
4126
4210
  /**
@@ -558,6 +558,7 @@ export declare const WorkerType: {
558
558
  readonly G_4X: "G.4X";
559
559
  readonly G_8X: "G.8X";
560
560
  readonly Standard: "Standard";
561
+ readonly Z_2X: "Z.2X";
561
562
  };
562
563
  export type WorkerType = (typeof WorkerType)[keyof typeof WorkerType];
563
564
  export interface DevEndpoint {
@@ -1383,6 +1384,7 @@ export interface JobCommand {
1383
1384
  Name?: string;
1384
1385
  ScriptLocation?: string;
1385
1386
  PythonVersion?: string;
1387
+ Runtime?: string;
1386
1388
  }
1387
1389
  export interface ConnectionsList {
1388
1390
  Connections?: string[];
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@aws-sdk/client-glue",
3
3
  "description": "AWS SDK for JavaScript Glue Client for Node.js, Browser and React Native",
4
- "version": "3.341.0",
4
+ "version": "3.342.0",
5
5
  "scripts": {
6
6
  "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'",
7
7
  "build:cjs": "tsc -p tsconfig.cjs.json",
@@ -21,36 +21,36 @@
21
21
  "dependencies": {
22
22
  "@aws-crypto/sha256-browser": "3.0.0",
23
23
  "@aws-crypto/sha256-js": "3.0.0",
24
- "@aws-sdk/client-sts": "3.341.0",
25
- "@aws-sdk/config-resolver": "3.341.0",
26
- "@aws-sdk/credential-provider-node": "3.341.0",
27
- "@aws-sdk/fetch-http-handler": "3.341.0",
28
- "@aws-sdk/hash-node": "3.341.0",
29
- "@aws-sdk/invalid-dependency": "3.341.0",
30
- "@aws-sdk/middleware-content-length": "3.341.0",
31
- "@aws-sdk/middleware-endpoint": "3.341.0",
32
- "@aws-sdk/middleware-host-header": "3.341.0",
33
- "@aws-sdk/middleware-logger": "3.341.0",
34
- "@aws-sdk/middleware-recursion-detection": "3.341.0",
35
- "@aws-sdk/middleware-retry": "3.341.0",
36
- "@aws-sdk/middleware-serde": "3.341.0",
37
- "@aws-sdk/middleware-signing": "3.341.0",
38
- "@aws-sdk/middleware-stack": "3.341.0",
39
- "@aws-sdk/middleware-user-agent": "3.341.0",
40
- "@aws-sdk/node-config-provider": "3.341.0",
41
- "@aws-sdk/node-http-handler": "3.341.0",
42
- "@aws-sdk/smithy-client": "3.341.0",
43
- "@aws-sdk/types": "3.341.0",
44
- "@aws-sdk/url-parser": "3.341.0",
24
+ "@aws-sdk/client-sts": "3.342.0",
25
+ "@aws-sdk/config-resolver": "3.342.0",
26
+ "@aws-sdk/credential-provider-node": "3.342.0",
27
+ "@aws-sdk/fetch-http-handler": "3.342.0",
28
+ "@aws-sdk/hash-node": "3.342.0",
29
+ "@aws-sdk/invalid-dependency": "3.342.0",
30
+ "@aws-sdk/middleware-content-length": "3.342.0",
31
+ "@aws-sdk/middleware-endpoint": "3.342.0",
32
+ "@aws-sdk/middleware-host-header": "3.342.0",
33
+ "@aws-sdk/middleware-logger": "3.342.0",
34
+ "@aws-sdk/middleware-recursion-detection": "3.342.0",
35
+ "@aws-sdk/middleware-retry": "3.342.0",
36
+ "@aws-sdk/middleware-serde": "3.342.0",
37
+ "@aws-sdk/middleware-signing": "3.342.0",
38
+ "@aws-sdk/middleware-stack": "3.342.0",
39
+ "@aws-sdk/middleware-user-agent": "3.342.0",
40
+ "@aws-sdk/node-config-provider": "3.342.0",
41
+ "@aws-sdk/node-http-handler": "3.342.0",
42
+ "@aws-sdk/smithy-client": "3.342.0",
43
+ "@aws-sdk/types": "3.342.0",
44
+ "@aws-sdk/url-parser": "3.342.0",
45
45
  "@aws-sdk/util-base64": "3.310.0",
46
46
  "@aws-sdk/util-body-length-browser": "3.310.0",
47
47
  "@aws-sdk/util-body-length-node": "3.310.0",
48
- "@aws-sdk/util-defaults-mode-browser": "3.341.0",
49
- "@aws-sdk/util-defaults-mode-node": "3.341.0",
50
- "@aws-sdk/util-endpoints": "3.341.0",
51
- "@aws-sdk/util-retry": "3.341.0",
52
- "@aws-sdk/util-user-agent-browser": "3.341.0",
53
- "@aws-sdk/util-user-agent-node": "3.341.0",
48
+ "@aws-sdk/util-defaults-mode-browser": "3.342.0",
49
+ "@aws-sdk/util-defaults-mode-node": "3.342.0",
50
+ "@aws-sdk/util-endpoints": "3.342.0",
51
+ "@aws-sdk/util-retry": "3.342.0",
52
+ "@aws-sdk/util-user-agent-browser": "3.342.0",
53
+ "@aws-sdk/util-user-agent-node": "3.342.0",
54
54
  "@aws-sdk/util-utf8": "3.310.0",
55
55
  "@smithy/protocol-http": "^1.0.1",
56
56
  "@smithy/types": "^1.0.0",