aws-sdk 2.1386.0 → 2.1388.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +21 -1
  2. package/README.md +1 -1
  3. package/apis/chime-sdk-voice-2022-08-03.min.json +2 -1
  4. package/apis/config-2014-11-12.min.json +129 -112
  5. package/apis/frauddetector-2019-11-15.min.json +29 -12
  6. package/apis/glue-2017-03-31.min.json +510 -509
  7. package/apis/healthlake-2017-07-01.min.json +43 -17
  8. package/apis/iotfleetwise-2021-06-17.min.json +86 -43
  9. package/apis/location-2020-11-19.min.json +23 -16
  10. package/apis/m2-2021-04-28.min.json +90 -40
  11. package/apis/personalize-2018-05-22.min.json +32 -13
  12. package/apis/rds-2014-10-31.min.json +4 -2
  13. package/apis/securityhub-2018-10-26.examples.json +140 -3
  14. package/apis/securityhub-2018-10-26.min.json +465 -158
  15. package/apis/securitylake-2018-05-10.min.json +457 -447
  16. package/apis/securitylake-2018-05-10.paginators.json +7 -7
  17. package/apis/wafv2-2019-07-29.min.json +168 -159
  18. package/apis/workspaces-web-2020-07-08.min.json +360 -71
  19. package/apis/workspaces-web-2020-07-08.paginators.json +5 -0
  20. package/clients/chimesdkvoice.d.ts +5 -0
  21. package/clients/configservice.d.ts +34 -13
  22. package/clients/frauddetector.d.ts +18 -4
  23. package/clients/glue.d.ts +29 -24
  24. package/clients/groundstation.d.ts +2 -2
  25. package/clients/healthlake.d.ts +40 -10
  26. package/clients/iotfleetwise.d.ts +98 -13
  27. package/clients/location.d.ts +24 -8
  28. package/clients/m2.d.ts +81 -2
  29. package/clients/memorydb.d.ts +2 -2
  30. package/clients/personalize.d.ts +34 -17
  31. package/clients/polly.d.ts +2 -2
  32. package/clients/rds.d.ts +10 -2
  33. package/clients/securityhub.d.ts +539 -1
  34. package/clients/securitylake.d.ts +560 -541
  35. package/clients/servicecatalog.d.ts +8 -8
  36. package/clients/wafv2.d.ts +16 -6
  37. package/clients/workspacesweb.d.ts +254 -0
  38. package/dist/aws-sdk-core-react-native.js +1 -1
  39. package/dist/aws-sdk-react-native.js +17 -17
  40. package/dist/aws-sdk.js +191 -146
  41. package/dist/aws-sdk.min.js +63 -63
  42. package/lib/core.js +1 -1
  43. package/package.json +1 -1
@@ -10,6 +10,11 @@
10
10
  "output_token": "nextToken",
11
11
  "limit_key": "maxResults"
12
12
  },
13
+ "ListIpAccessSettings": {
14
+ "input_token": "nextToken",
15
+ "output_token": "nextToken",
16
+ "limit_key": "maxResults"
17
+ },
13
18
  "ListNetworkSettings": {
14
19
  "input_token": "nextToken",
15
20
  "output_token": "nextToken",
@@ -872,6 +872,7 @@ declare namespace ChimeSDKVoice {
872
872
  */
873
873
  IsCaller?: Boolean;
874
874
  }
875
+ export type CallLegType = "Caller"|"Callee"|string;
875
876
  export type CallingName = string;
876
877
  export type CallingNameStatus = "Unassigned"|"UpdateInProgress"|"UpdateSucceeded"|"UpdateFailed"|string;
877
878
  export type CallingRegion = string;
@@ -2567,6 +2568,10 @@ declare namespace ChimeSDKVoice {
2567
2568
  * The unique identifier for the client request. Use a different token for different speaker search tasks.
2568
2569
  */
2569
2570
  ClientRequestToken?: ClientRequestId;
2571
+ /**
2572
+ * Specifies which call leg to stream for speaker search.
2573
+ */
2574
+ CallLeg?: CallLegType;
2570
2575
  }
2571
2576
  export interface StartSpeakerSearchTaskResponse {
2572
2577
  /**
@@ -228,19 +228,19 @@ declare class ConfigService extends Service {
228
228
  */
229
229
  describeConfigurationAggregators(callback?: (err: AWSError, data: ConfigService.Types.DescribeConfigurationAggregatorsResponse) => void): Request<ConfigService.Types.DescribeConfigurationAggregatorsResponse, AWSError>;
230
230
  /**
231
- * Returns the current status of the specified configuration recorder as well as the status of the last recording event for the recorder. If a configuration recorder is not specified, this action returns the status of all configuration recorders associated with the account. Currently, you can specify only one configuration recorder per region in your account. For a detailed status of recording events over time, add your Config events to Amazon CloudWatch metrics and use CloudWatch metrics.
231
+ * Returns the current status of the specified configuration recorder as well as the status of the last recording event for the recorder. If a configuration recorder is not specified, this action returns the status of all configuration recorders associated with the account. &gt;You can specify only one configuration recorder for each Amazon Web Services Region for each account. For a detailed status of recording events over time, add your Config events to Amazon CloudWatch metrics and use CloudWatch metrics.
232
232
  */
233
233
  describeConfigurationRecorderStatus(params: ConfigService.Types.DescribeConfigurationRecorderStatusRequest, callback?: (err: AWSError, data: ConfigService.Types.DescribeConfigurationRecorderStatusResponse) => void): Request<ConfigService.Types.DescribeConfigurationRecorderStatusResponse, AWSError>;
234
234
  /**
235
- * Returns the current status of the specified configuration recorder as well as the status of the last recording event for the recorder. If a configuration recorder is not specified, this action returns the status of all configuration recorders associated with the account. Currently, you can specify only one configuration recorder per region in your account. For a detailed status of recording events over time, add your Config events to Amazon CloudWatch metrics and use CloudWatch metrics.
235
+ * Returns the current status of the specified configuration recorder as well as the status of the last recording event for the recorder. If a configuration recorder is not specified, this action returns the status of all configuration recorders associated with the account. &gt;You can specify only one configuration recorder for each Amazon Web Services Region for each account. For a detailed status of recording events over time, add your Config events to Amazon CloudWatch metrics and use CloudWatch metrics.
236
236
  */
237
237
  describeConfigurationRecorderStatus(callback?: (err: AWSError, data: ConfigService.Types.DescribeConfigurationRecorderStatusResponse) => void): Request<ConfigService.Types.DescribeConfigurationRecorderStatusResponse, AWSError>;
238
238
  /**
239
- * Returns the details for the specified configuration recorders. If the configuration recorder is not specified, this action returns the details for all configuration recorders associated with the account. Currently, you can specify only one configuration recorder per region in your account.
239
+ * Returns the details for the specified configuration recorders. If the configuration recorder is not specified, this action returns the details for all configuration recorders associated with the account. You can specify only one configuration recorder for each Amazon Web Services Region for each account.
240
240
  */
241
241
  describeConfigurationRecorders(params: ConfigService.Types.DescribeConfigurationRecordersRequest, callback?: (err: AWSError, data: ConfigService.Types.DescribeConfigurationRecordersResponse) => void): Request<ConfigService.Types.DescribeConfigurationRecordersResponse, AWSError>;
242
242
  /**
243
- * Returns the details for the specified configuration recorders. If the configuration recorder is not specified, this action returns the details for all configuration recorders associated with the account. Currently, you can specify only one configuration recorder per region in your account.
243
+ * Returns the details for the specified configuration recorders. If the configuration recorder is not specified, this action returns the details for all configuration recorders associated with the account. You can specify only one configuration recorder for each Amazon Web Services Region for each account.
244
244
  */
245
245
  describeConfigurationRecorders(callback?: (err: AWSError, data: ConfigService.Types.DescribeConfigurationRecordersResponse) => void): Request<ConfigService.Types.DescribeConfigurationRecordersResponse, AWSError>;
246
246
  /**
@@ -576,11 +576,11 @@ declare class ConfigService extends Service {
576
576
  */
577
577
  putConfigurationAggregator(callback?: (err: AWSError, data: ConfigService.Types.PutConfigurationAggregatorResponse) => void): Request<ConfigService.Types.PutConfigurationAggregatorResponse, AWSError>;
578
578
  /**
579
- * Creates a new configuration recorder to record the selected resource configurations. You can use this action to change the role roleARN or the recordingGroup of an existing recorder. To change the role, call the action on the existing configuration recorder and specify a role. Currently, you can specify only one configuration recorder per region in your account. If ConfigurationRecorder does not have the recordingGroup parameter specified, the default is to record all supported resource types.
579
+ * Creates a new configuration recorder to record configuration changes for specified resource types. You can also use this action to change the roleARN or the recordingGroup of an existing recorder. For more information, see Managing the Configuration Recorder in the Config Developer Guide. You can specify only one configuration recorder for each Amazon Web Services Region for each account. If the configuration recorder does not have the recordingGroup field specified, the default is to record all supported resource types.
580
580
  */
581
581
  putConfigurationRecorder(params: ConfigService.Types.PutConfigurationRecorderRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
582
582
  /**
583
- * Creates a new configuration recorder to record the selected resource configurations. You can use this action to change the role roleARN or the recordingGroup of an existing recorder. To change the role, call the action on the existing configuration recorder and specify a role. Currently, you can specify only one configuration recorder per region in your account. If ConfigurationRecorder does not have the recordingGroup parameter specified, the default is to record all supported resource types.
583
+ * Creates a new configuration recorder to record configuration changes for specified resource types. You can also use this action to change the roleARN or the recordingGroup of an existing recorder. For more information, see Managing the Configuration Recorder in the Config Developer Guide. You can specify only one configuration recorder for each Amazon Web Services Region for each account. If the configuration recorder does not have the recordingGroup field specified, the default is to record all supported resource types.
584
584
  */
585
585
  putConfigurationRecorder(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
586
586
  /**
@@ -1476,15 +1476,15 @@ declare namespace ConfigService {
1476
1476
  export type ConfigurationItemStatus = "OK"|"ResourceDiscovered"|"ResourceNotRecorded"|"ResourceDeleted"|"ResourceDeletedNotRecorded"|string;
1477
1477
  export interface ConfigurationRecorder {
1478
1478
  /**
1479
- * The name of the recorder. By default, Config automatically assigns the name "default" when creating the configuration recorder. You cannot change the assigned name.
1479
+ * The name of the configuration recorder. Config automatically assigns the name of "default" when creating the configuration recorder. You cannot change the name of the configuration recorder after it has been created. To change the configuration recorder name, you must delete it and create a new configuration recorder with a new name.
1480
1480
  */
1481
1481
  name?: RecorderName;
1482
1482
  /**
1483
- * Amazon Resource Name (ARN) of the IAM role used to describe the Amazon Web Services resources associated with the account. While the API model does not require this field, the server will reject a request without a defined roleARN for the configuration recorder.
1483
+ * Amazon Resource Name (ARN) of the IAM role assumed by Config and used by the configuration recorder. While the API model does not require this field, the server will reject a request without a defined roleARN for the configuration recorder. Pre-existing Config role If you have used an Amazon Web Services service that uses Config, such as Security Hub or Control Tower, and an Config role has already been created, make sure that the IAM role that you use when setting up Config keeps the same minimum permissions as the already created Config role. You must do this so that the other Amazon Web Services service continues to run as expected. For example, if Control Tower has an IAM role that allows Config to read Amazon Simple Storage Service (Amazon S3) objects, make sure that the same permissions are granted within the IAM role you use when setting up Config. Otherwise, it may interfere with how Control Tower operates. For more information about IAM roles for Config, see Identity and Access Management for Config in the Config Developer Guide.
1484
1484
  */
1485
1485
  roleARN?: String;
1486
1486
  /**
1487
- * Specifies the types of Amazon Web Services resources for which Config records configuration changes.
1487
+ * Specifies which resource types Config records for configuration changes. High Number of Config Evaluations You may notice increased activity in your account during your initial month recording with Config when compared to subsequent months. During the initial bootstrapping process, Config runs evaluations on all the resources in your account that you have selected for Config to record. If you are running ephemeral workloads, you may see increased activity from Config as it records configuration changes associated with creating and deleting these temporary resources. An ephemeral workload is a temporary use of computing resources that are loaded and run when needed. Examples include Amazon Elastic Compute Cloud (Amazon EC2) Spot Instances, Amazon EMR jobs, and Auto Scaling. If you want to avoid the increased activity from running ephemeral workloads, you can run these types of workloads in a separate account with Config turned off to avoid increased configuration recording and rule evaluations.
1488
1488
  */
1489
1489
  recordingGroup?: RecordingGroup;
1490
1490
  }
@@ -2604,6 +2604,12 @@ declare namespace ConfigService {
2604
2604
  export type Evaluations = Evaluation[];
2605
2605
  export type EventSource = "aws.config"|string;
2606
2606
  export type ExcludedAccounts = AccountId[];
2607
+ export interface ExclusionByResourceTypes {
2608
+ /**
2609
+ * A comma-separated list of resource types to exclude from recording by the configuration recorder.
2610
+ */
2611
+ resourceTypes?: ResourceTypeList;
2612
+ }
2607
2613
  export interface ExecutionControls {
2608
2614
  /**
2609
2615
  * A SsmControls object.
@@ -3797,7 +3803,7 @@ declare namespace ConfigService {
3797
3803
  }
3798
3804
  export interface PutConfigurationRecorderRequest {
3799
3805
  /**
3800
- * The configuration recorder object that records each configuration change made to the resources.
3806
+ * An object for the configuration recorder to record configuration changes for specified resource types.
3801
3807
  */
3802
3808
  ConfigurationRecorder: ConfigurationRecorder;
3803
3809
  }
@@ -4044,18 +4050,33 @@ declare namespace ConfigService {
4044
4050
  export type RecorderStatus = "Pending"|"Success"|"Failure"|string;
4045
4051
  export interface RecordingGroup {
4046
4052
  /**
4047
- * Specifies whether Config records configuration changes for every supported type of regional resource. If you set this option to true, when Config adds support for a new type of regional resource, it starts recording resources of that type automatically. If you set this option to true, you cannot enumerate a list of resourceTypes.
4053
+ * Specifies whether Config records configuration changes for all supported regional resource types. If you set this field to true, when Config adds support for a new type of regional resource, Config starts recording resources of that type automatically. If you set this field to true, you cannot enumerate specific resource types to record in the resourceTypes field of RecordingGroup, or to exclude in the resourceTypes field of ExclusionByResourceTypes.
4048
4054
  */
4049
4055
  allSupported?: AllSupported;
4050
4056
  /**
4051
- * Specifies whether Config includes all supported types of global resources (for example, IAM resources) with the resources that it records. Before you can set this option to true, you must set the allSupported option to true. If you set this option to true, when Config adds support for a new type of global resource, it starts recording resources of that type automatically. The configuration details for any global resource are the same in all regions. To prevent duplicate configuration items, you should consider customizing Config in only one region to record global resources.
4057
+ * Specifies whether Config records configuration changes for all supported global resources. Before you set this field to true, set the allSupported field of RecordingGroup to true. Optionally, you can set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES. If you set this field to true, when Config adds support for a new type of global resource in the Region where you set up the configuration recorder, Config starts recording resources of that type automatically. If you set this field to false but list global resource types in the resourceTypes field of RecordingGroup, Config will still record configuration changes for those specified resource types regardless of if you set the includeGlobalResourceTypes field to false. If you do not want to record configuration changes to global resource types, make sure to not list them in the resourceTypes field in addition to setting the includeGlobalResourceTypes field to false.
4052
4058
  */
4053
4059
  includeGlobalResourceTypes?: IncludeGlobalResourceTypes;
4054
4060
  /**
4055
- * A comma-separated list that specifies the types of Amazon Web Services resources for which Config records configuration changes (for example, AWS::EC2::Instance or AWS::CloudTrail::Trail). To record all configuration changes, you must set the allSupported option to true. If you set the AllSupported option to false and populate the ResourceTypes option with values, when Config adds support for a new type of resource, it will not record resources of that type unless you manually add that type to your recording group. For a list of valid resourceTypes values, see the resourceType Value column in Supported Amazon Web Services resource Types.
4061
+ * A comma-separated list that specifies which resource types Config records. Optionally, you can set the useOnly field of RecordingStrategy to INCLUSION_BY_RESOURCE_TYPES. To record all configuration changes, set the allSupported field of RecordingGroup to true, and either omit this field or don't specify any resource types in this field. If you set the allSupported field to false and specify values for resourceTypes, when Config adds support for a new type of resource, it will not record resources of that type unless you manually add that type to your recording group. For a list of valid resourceTypes values, see the Resource Type Value column in Supported Amazon Web Services resource Types in the Config developer guide. Region Availability Before specifying a resource type for Config to track, check Resource Coverage by Region Availability to see if the resource type is supported in the Amazon Web Services Region where you set up Config. If a resource type is supported by Config in at least one Region, you can enable the recording of that resource type in all Regions supported by Config, even if the specified resource type is not supported in the Amazon Web Services Region where you set up Config.
4056
4062
  */
4057
4063
  resourceTypes?: ResourceTypeList;
4064
+ /**
4065
+ * An object that specifies how Config excludes resource types from being recorded by the configuration recorder. To use this option, you must set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES.
4066
+ */
4067
+ exclusionByResourceTypes?: ExclusionByResourceTypes;
4068
+ /**
4069
+ * An object that specifies the recording strategy for the configuration recorder. If you set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported regional resource types. You also must set the allSupported field of RecordingGroup to true. When Config adds support for a new type of regional resource, Config automatically starts recording resources of that type. If you set the useOnly field of RecordingStrategy to INCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for only the resource types you specify in the resourceTypes field of RecordingGroup. If you set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for all supported resource types except the resource types that you specify as exemptions to exclude from being recorded in the resourceTypes field of ExclusionByResourceTypes. The recordingStrategy field is optional when you set the allSupported field of RecordingGroup to true. The recordingStrategy field is optional when you list resource types in the resourceTypes field of RecordingGroup. The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes. If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request. For example, even if you set includeGlobalResourceTypes to false, global resource types will still be automatically recorded in this option unless those resource types are specifically listed as exemptions in the resourceTypes field of exclusionByResourceTypes. By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.
4070
+ */
4071
+ recordingStrategy?: RecordingStrategy;
4072
+ }
4073
+ export interface RecordingStrategy {
4074
+ /**
4075
+ * The recording strategy for the configuration recorder. If you set this option to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported regional resource types. You also must set the allSupported field of RecordingGroup to true. When Config adds support for a new type of regional resource, Config automatically starts recording resources of that type. For a list of supported resource types, see Supported Resource Types in the Config developer guide. If you set this option to INCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for only the resource types that you specify in the resourceTypes field of RecordingGroup. If you set this option to EXCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for all supported resource types, except the resource types that you specify as exemptions to exclude from being recorded in the resourceTypes field of ExclusionByResourceTypes. The recordingStrategy field is optional when you set the allSupported field of RecordingGroup to true. The recordingStrategy field is optional when you list resource types in the resourceTypes field of RecordingGroup. The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes. If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request. For example, even if you set includeGlobalResourceTypes to false, global resource types will still be automatically recorded in this option unless those resource types are specifically listed as exemptions in the resourceTypes field of exclusionByResourceTypes. By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.
4076
+ */
4077
+ useOnly?: RecordingStrategyType;
4058
4078
  }
4079
+ export type RecordingStrategyType = "ALL_SUPPORTED_RESOURCE_TYPES"|"INCLUSION_BY_RESOURCE_TYPES"|"EXCLUSION_BY_RESOURCE_TYPES"|string;
4059
4080
  export type ReevaluateConfigRuleNames = ConfigRuleName[];
4060
4081
  export type RelatedEvent = string;
4061
4082
  export type RelatedEventList = RelatedEvent[];
@@ -1572,6 +1572,12 @@ declare namespace FraudDetector {
1572
1572
  }
1573
1573
  export type EventAttributeMap = {[key: string]: attributeValue};
1574
1574
  export type EventIngestion = "ENABLED"|"DISABLED"|string;
1575
+ export interface EventOrchestration {
1576
+ /**
1577
+ * Specifies if event orchestration is enabled through Amazon EventBridge.
1578
+ */
1579
+ eventBridgeEnabled: Boolean;
1580
+ }
1575
1581
  export interface EventPredictionSummary {
1576
1582
  /**
1577
1583
  * The event ID.
@@ -1640,6 +1646,10 @@ declare namespace FraudDetector {
1640
1646
  * The entity type ARN.
1641
1647
  */
1642
1648
  arn?: fraudDetectorArn;
1649
+ /**
1650
+ * The event orchestration status.
1651
+ */
1652
+ eventOrchestration?: EventOrchestration;
1643
1653
  }
1644
1654
  export type EventVariableMap = {[key: string]: variableValue};
1645
1655
  export interface EventVariableSummary {
@@ -2447,7 +2457,7 @@ declare namespace FraudDetector {
2447
2457
  */
2448
2458
  labelMapper?: labelMapper;
2449
2459
  /**
2450
- * The action to take for unlabeled events. Use IGNORE if you want the unlabeled events to be ignored. This is recommended when the majority of the events in the dataset are labeled. Use FRAUD if you want to categorize all unlabeled events as “Fraud”. This is recommended when most of the events in your dataset are fraudulent. Use LEGIT f you want to categorize all unlabeled events as “Legit”. This is recommended when most of the events in your dataset are legitimate. Use AUTO if you want Amazon Fraud Detector to decide how to use the unlabeled data. This is recommended when there is significant unlabeled events in the dataset. By default, Amazon Fraud Detector ignores the unlabeled data.
2460
+ * The action to take for unlabeled events. Use IGNORE if you want the unlabeled events to be ignored. This is recommended when the majority of the events in the dataset are labeled. Use FRAUD if you want to categorize all unlabeled events as “Fraud”. This is recommended when most of the events in your dataset are fraudulent. Use LEGIT if you want to categorize all unlabeled events as “Legit”. This is recommended when most of the events in your dataset are legitimate. Use AUTO if you want Amazon Fraud Detector to decide how to use the unlabeled data. This is recommended when there is significant unlabeled events in the dataset. By default, Amazon Fraud Detector ignores the unlabeled data.
2451
2461
  */
2452
2462
  unlabeledEventsTreatment?: UnlabeledEventsTreatment;
2453
2463
  }
@@ -2888,13 +2898,17 @@ declare namespace FraudDetector {
2888
2898
  */
2889
2899
  entityTypes: NonEmptyListOfStrings;
2890
2900
  /**
2891
- * Specifies if ingenstion is enabled or disabled.
2901
+ * Specifies if ingestion is enabled or disabled.
2892
2902
  */
2893
2903
  eventIngestion?: EventIngestion;
2894
2904
  /**
2895
2905
  * A collection of key and value pairs.
2896
2906
  */
2897
2907
  tags?: tagList;
2908
+ /**
2909
+ * Enables or disables event orchestration. If enabled, you can send event predictions to select AWS services for downstream processing of the events.
2910
+ */
2911
+ eventOrchestration?: EventOrchestration;
2898
2912
  }
2899
2913
  export interface PutEventTypeResult {
2900
2914
  }
@@ -2948,7 +2962,7 @@ declare namespace FraudDetector {
2948
2962
  */
2949
2963
  description?: description;
2950
2964
  /**
2951
- *
2965
+ * A collection of key and value pairs.
2952
2966
  */
2953
2967
  tags?: tagList;
2954
2968
  }
@@ -3198,7 +3212,7 @@ declare namespace FraudDetector {
3198
3212
  */
3199
3213
  lowerBoundValue: float;
3200
3214
  /**
3201
- * The lower bound value of the area under curve (auc).
3215
+ * The upper bound value of the area under curve (auc).
3202
3216
  */
3203
3217
  upperBoundValue: float;
3204
3218
  }
package/clients/glue.d.ts CHANGED
@@ -3965,11 +3965,11 @@ declare namespace Glue {
3965
3965
  */
3966
3966
  Command: JobCommand;
3967
3967
  /**
3968
- * The default arguments for this job. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.
3968
+ * The default arguments for every run of this job, specified as name-value pairs. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters Used by Glue topic in the developer guide. For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray jobs in the developer guide.
3969
3969
  */
3970
3970
  DefaultArguments?: GenericMap;
3971
3971
  /**
3972
- * Non-overridable arguments for this job, specified as name-value pairs.
3972
+ * Arguments for this job that are not overridden when providing job arguments in a job run, specified as name-value pairs.
3973
3973
  */
3974
3974
  NonOverridableArguments?: GenericMap;
3975
3975
  /**
@@ -3989,7 +3989,7 @@ declare namespace Glue {
3989
3989
  */
3990
3990
  Timeout?: Timeout;
3991
3991
  /**
3992
- * For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. Do not set Max Capacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation. For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers.
3992
+ * For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers. Do not set MaxCapacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
3993
3993
  */
3994
3994
  MaxCapacity?: NullableDouble;
3995
3995
  /**
@@ -4005,7 +4005,7 @@ declare namespace Glue {
4005
4005
  */
4006
4006
  NotificationProperty?: NotificationProperty;
4007
4007
  /**
4008
- * Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9.
4008
+ * In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in a job. The Python version indicates the version supported for jobs of type Spark. Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python and additional libraries available in your Ray job are determined by the Runtime parameter of the Job command. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9.
4009
4009
  */
4010
4010
  GlueVersion?: GlueVersionString;
4011
4011
  /**
@@ -4013,7 +4013,7 @@ declare namespace Glue {
4013
4013
  */
4014
4014
  NumberOfWorkers?: NullableInteger;
4015
4015
  /**
4016
- * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
4016
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.
4017
4017
  */
4018
4018
  WorkerType?: WorkerType;
4019
4019
  /**
@@ -8235,11 +8235,11 @@ declare namespace Glue {
8235
8235
  */
8236
8236
  Command?: JobCommand;
8237
8237
  /**
8238
- * The default arguments for this job, specified as name-value pairs. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.
8238
+ * The default arguments for every run of this job, specified as name-value pairs. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters Used by Glue topic in the developer guide. For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray jobs in the developer guide.
8239
8239
  */
8240
8240
  DefaultArguments?: GenericMap;
8241
8241
  /**
8242
- * Non-overridable arguments for this job, specified as name-value pairs.
8242
+ * Arguments for this job that are not overridden when providing job arguments in a job run, specified as name-value pairs.
8243
8243
  */
8244
8244
  NonOverridableArguments?: GenericMap;
8245
8245
  /**
@@ -8259,11 +8259,11 @@ declare namespace Glue {
8259
8259
  */
8260
8260
  Timeout?: Timeout;
8261
8261
  /**
8262
- * For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. For Glue version 2.0 or later jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers. Do not set MaxCapacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
8262
+ * For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. For Glue version 2.0 or later jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers. Do not set MaxCapacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
8263
8263
  */
8264
8264
  MaxCapacity?: NullableDouble;
8265
8265
  /**
8266
- * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPU, 64 GB of memory, 256 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later jobs. For the G.8X worker type, each worker maps to 8 DPU (32 vCPU, 128 GB of memory, 512 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
8266
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, G.4X, G.8X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPU, 64 GB of memory, 256 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPU, 128 GB of memory, 512 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides a default of 8 Ray workers (1 per vCPU).
8267
8267
  */
8268
8268
  WorkerType?: WorkerType;
8269
8269
  /**
@@ -8279,7 +8279,7 @@ declare namespace Glue {
8279
8279
  */
8280
8280
  NotificationProperty?: NotificationProperty;
8281
8281
  /**
8282
- * Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9.
8282
+ * In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in a job. The Python version indicates the version supported for jobs of type Spark. Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python and additional libraries available in your Ray job are determined by the Runtime parameter of the Job command. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9.
8283
8283
  */
8284
8284
  GlueVersion?: GlueVersionString;
8285
8285
  /**
@@ -8338,7 +8338,7 @@ declare namespace Glue {
8338
8338
  export type JobBookmarksEncryptionMode = "DISABLED"|"CSE-KMS"|string;
8339
8339
  export interface JobCommand {
8340
8340
  /**
8341
- * The name of the job command. For an Apache Spark ETL job, this must be glueetl. For a Python shell job, it must be pythonshell. For an Apache Spark streaming ETL job, this must be gluestreaming.
8341
+ * The name of the job command. For an Apache Spark ETL job, this must be glueetl. For a Python shell job, it must be pythonshell. For an Apache Spark streaming ETL job, this must be gluestreaming. For a Ray job, this must be glueray.
8342
8342
  */
8343
8343
  Name?: GenericString;
8344
8344
  /**
@@ -8349,6 +8349,10 @@ declare namespace Glue {
8349
8349
  * The Python version being used to run a Python shell job. Allowed values are 2 or 3.
8350
8350
  */
8351
8351
  PythonVersion?: PythonVersionString;
8352
+ /**
8353
+ * In Ray jobs, Runtime is used to specify the versions of Ray, Python and additional libraries available in your environment. This field is not used in other job types. For supported runtime environment values, see Working with Ray jobs in the Glue Developer Guide.
8354
+ */
8355
+ Runtime?: RuntimeNameString;
8352
8356
  }
8353
8357
  export type JobList = Job[];
8354
8358
  export type JobName = string;
@@ -8397,7 +8401,7 @@ declare namespace Glue {
8397
8401
  */
8398
8402
  JobRunState?: JobRunState;
8399
8403
  /**
8400
- * The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. For information about how to specify and consume your own job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.
8404
+ * The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters Used by Glue topic in the developer guide. For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray jobs in the developer guide.
8401
8405
  */
8402
8406
  Arguments?: GenericMap;
8403
8407
  /**
@@ -8421,11 +8425,11 @@ declare namespace Glue {
8421
8425
  */
8422
8426
  Timeout?: Timeout;
8423
8427
  /**
8424
- * The number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. Do not set Max Capacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
8428
+ * For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers. Do not set MaxCapacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
8425
8429
  */
8426
8430
  MaxCapacity?: NullableDouble;
8427
8431
  /**
8428
- * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker. For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
8432
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.
8429
8433
  */
8430
8434
  WorkerType?: WorkerType;
8431
8435
  /**
@@ -8445,7 +8449,7 @@ declare namespace Glue {
8445
8449
  */
8446
8450
  NotificationProperty?: NotificationProperty;
8447
8451
  /**
8448
- * Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9.
8452
+ * In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in a job. The Python version indicates the version supported for jobs of type Spark. Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python and additional libraries available in your Ray job are determined by the Runtime parameter of the Job command. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9.
8449
8453
  */
8450
8454
  GlueVersion?: GlueVersionString;
8451
8455
  /**
@@ -8481,11 +8485,11 @@ declare namespace Glue {
8481
8485
  */
8482
8486
  Command?: JobCommand;
8483
8487
  /**
8484
- * The default arguments for this job. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.
8488
+ * The default arguments for every run of this job, specified as name-value pairs. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters Used by Glue topic in the developer guide. For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray jobs in the developer guide.
8485
8489
  */
8486
8490
  DefaultArguments?: GenericMap;
8487
8491
  /**
8488
- * Non-overridable arguments for this job, specified as name-value pairs.
8492
+ * Arguments for this job that are not overridden when providing job arguments in a job run, specified as name-value pairs.
8489
8493
  */
8490
8494
  NonOverridableArguments?: GenericMap;
8491
8495
  /**
@@ -8505,11 +8509,11 @@ declare namespace Glue {
8505
8509
  */
8506
8510
  Timeout?: Timeout;
8507
8511
  /**
8508
- * For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. Do not set Max Capacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation. For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers.
8512
+ * For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers. Do not set MaxCapacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
8509
8513
  */
8510
8514
  MaxCapacity?: NullableDouble;
8511
8515
  /**
8512
- * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
8516
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.
8513
8517
  */
8514
8518
  WorkerType?: WorkerType;
8515
8519
  /**
@@ -8525,7 +8529,7 @@ declare namespace Glue {
8525
8529
  */
8526
8530
  NotificationProperty?: NotificationProperty;
8527
8531
  /**
8528
- * Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
8532
+ * In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in a job. The Python version indicates the version supported for jobs of type Spark. Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python and additional libraries available in your Ray job are determined by the Runtime parameter of the Job command. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9.
8529
8533
  */
8530
8534
  GlueVersion?: GlueVersionString;
8531
8535
  /**
@@ -10457,6 +10461,7 @@ declare namespace Glue {
10457
10461
  */
10458
10462
  Id?: IntegerValue;
10459
10463
  }
10464
+ export type RuntimeNameString = string;
10460
10465
  export interface S3CatalogDeltaSource {
10461
10466
  /**
10462
10467
  * The name of the Delta Lake data source.
@@ -11689,7 +11694,7 @@ declare namespace Glue {
11689
11694
  */
11690
11695
  JobRunId?: IdString;
11691
11696
  /**
11692
- * The job arguments specifically for this run. For this job run, they replace the default arguments set in the job definition itself. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.
11697
+ * The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters Used by Glue topic in the developer guide. For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray jobs in the developer guide.
11693
11698
  */
11694
11699
  Arguments?: GenericMap;
11695
11700
  /**
@@ -11701,7 +11706,7 @@ declare namespace Glue {
11701
11706
  */
11702
11707
  Timeout?: Timeout;
11703
11708
  /**
11704
- * The number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. Do not set Max Capacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, or an Apache Spark ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
11709
+ * For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers. Do not set MaxCapacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
11705
11710
  */
11706
11711
  MaxCapacity?: NullableDouble;
11707
11712
  /**
@@ -11713,7 +11718,7 @@ declare namespace Glue {
11713
11718
  */
11714
11719
  NotificationProperty?: NotificationProperty;
11715
11720
  /**
11716
- * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker. For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
11721
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.
11717
11722
  */
11718
11723
  WorkerType?: WorkerType;
11719
11724
  /**
@@ -13290,7 +13295,7 @@ declare namespace Glue {
13290
13295
  export type VersionString = string;
13291
13296
  export type VersionsString = string;
13292
13297
  export type ViewTextString = string;
13293
- export type WorkerType = "Standard"|"G.1X"|"G.2X"|"G.025X"|"G.4X"|"G.8X"|string;
13298
+ export type WorkerType = "Standard"|"G.1X"|"G.2X"|"G.025X"|"G.4X"|"G.8X"|"Z.2X"|string;
13294
13299
  export interface Workflow {
13295
13300
  /**
13296
13301
  * The name of the workflow.
@@ -125,11 +125,11 @@ declare class GroundStation extends Service {
125
125
  */
126
126
  getDataflowEndpointGroup(callback?: (err: AWSError, data: GroundStation.Types.GetDataflowEndpointGroupResponse) => void): Request<GroundStation.Types.GetDataflowEndpointGroupResponse, AWSError>;
127
127
  /**
128
- * Returns the number of minutes used by account.
128
+ * Returns the number of reserved minutes used by account.
129
129
  */
130
130
  getMinuteUsage(params: GroundStation.Types.GetMinuteUsageRequest, callback?: (err: AWSError, data: GroundStation.Types.GetMinuteUsageResponse) => void): Request<GroundStation.Types.GetMinuteUsageResponse, AWSError>;
131
131
  /**
132
- * Returns the number of minutes used by account.
132
+ * Returns the number of reserved minutes used by account.
133
133
  */
134
134
  getMinuteUsage(callback?: (err: AWSError, data: GroundStation.Types.GetMinuteUsageResponse) => void): Request<GroundStation.Types.GetMinuteUsageResponse, AWSError>;
135
135
  /**