aws-sdk 2.1386.0 → 2.1387.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -1
- package/README.md +1 -1
- package/apis/chime-sdk-voice-2022-08-03.min.json +2 -1
- package/apis/glue-2017-03-31.min.json +510 -509
- package/apis/iotfleetwise-2021-06-17.min.json +86 -43
- package/apis/location-2020-11-19.min.json +23 -16
- package/apis/personalize-2018-05-22.min.json +32 -13
- package/apis/securityhub-2018-10-26.examples.json +140 -3
- package/apis/securityhub-2018-10-26.min.json +465 -158
- package/apis/securitylake-2018-05-10.min.json +457 -447
- package/apis/securitylake-2018-05-10.paginators.json +7 -7
- package/apis/wafv2-2019-07-29.min.json +168 -159
- package/clients/chimesdkvoice.d.ts +5 -0
- package/clients/glue.d.ts +29 -24
- package/clients/groundstation.d.ts +2 -2
- package/clients/iotfleetwise.d.ts +98 -13
- package/clients/location.d.ts +24 -8
- package/clients/memorydb.d.ts +2 -2
- package/clients/personalize.d.ts +34 -17
- package/clients/polly.d.ts +2 -2
- package/clients/securityhub.d.ts +539 -1
- package/clients/securitylake.d.ts +560 -541
- package/clients/wafv2.d.ts +16 -6
- package/dist/aws-sdk-core-react-native.js +1 -1
- package/dist/aws-sdk-react-native.js +10 -10
- package/dist/aws-sdk.js +58 -32
- package/dist/aws-sdk.min.js +30 -30
- package/lib/core.js +1 -1
- package/package.json +1 -1
@@ -872,6 +872,7 @@ declare namespace ChimeSDKVoice {
|
|
872
872
|
*/
|
873
873
|
IsCaller?: Boolean;
|
874
874
|
}
|
875
|
+
export type CallLegType = "Caller"|"Callee"|string;
|
875
876
|
export type CallingName = string;
|
876
877
|
export type CallingNameStatus = "Unassigned"|"UpdateInProgress"|"UpdateSucceeded"|"UpdateFailed"|string;
|
877
878
|
export type CallingRegion = string;
|
@@ -2567,6 +2568,10 @@ declare namespace ChimeSDKVoice {
|
|
2567
2568
|
* The unique identifier for the client request. Use a different token for different speaker search tasks.
|
2568
2569
|
*/
|
2569
2570
|
ClientRequestToken?: ClientRequestId;
|
2571
|
+
/**
|
2572
|
+
* Specifies which call leg to stream for speaker search.
|
2573
|
+
*/
|
2574
|
+
CallLeg?: CallLegType;
|
2570
2575
|
}
|
2571
2576
|
export interface StartSpeakerSearchTaskResponse {
|
2572
2577
|
/**
|
package/clients/glue.d.ts
CHANGED
@@ -3965,11 +3965,11 @@ declare namespace Glue {
|
|
3965
3965
|
*/
|
3966
3966
|
Command: JobCommand;
|
3967
3967
|
/**
|
3968
|
-
* The default arguments for this job. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the
|
3968
|
+
* The default arguments for every run of this job, specified as name-value pairs. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters Used by Glue topic in the developer guide. For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray jobs in the developer guide.
|
3969
3969
|
*/
|
3970
3970
|
DefaultArguments?: GenericMap;
|
3971
3971
|
/**
|
3972
|
-
*
|
3972
|
+
* Arguments for this job that are not overridden when providing job arguments in a job run, specified as name-value pairs.
|
3973
3973
|
*/
|
3974
3974
|
NonOverridableArguments?: GenericMap;
|
3975
3975
|
/**
|
@@ -3989,7 +3989,7 @@ declare namespace Glue {
|
|
3989
3989
|
*/
|
3990
3990
|
Timeout?: Timeout;
|
3991
3991
|
/**
|
3992
|
-
* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the
|
3992
|
+
* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers. Do not set MaxCapacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
|
3993
3993
|
*/
|
3994
3994
|
MaxCapacity?: NullableDouble;
|
3995
3995
|
/**
|
@@ -4005,7 +4005,7 @@ declare namespace Glue {
|
|
4005
4005
|
*/
|
4006
4006
|
NotificationProperty?: NotificationProperty;
|
4007
4007
|
/**
|
4008
|
-
*
|
4008
|
+
* In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in a job. The Python version indicates the version supported for jobs of type Spark. Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python and additional libraries available in your Ray job are determined by the Runtime parameter of the Job command. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9.
|
4009
4009
|
*/
|
4010
4010
|
GlueVersion?: GlueVersionString;
|
4011
4011
|
/**
|
@@ -4013,7 +4013,7 @@ declare namespace Glue {
|
|
4013
4013
|
*/
|
4014
4014
|
NumberOfWorkers?: NullableInteger;
|
4015
4015
|
/**
|
4016
|
-
* The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
|
4016
|
+
* The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.
|
4017
4017
|
*/
|
4018
4018
|
WorkerType?: WorkerType;
|
4019
4019
|
/**
|
@@ -8235,11 +8235,11 @@ declare namespace Glue {
|
|
8235
8235
|
*/
|
8236
8236
|
Command?: JobCommand;
|
8237
8237
|
/**
|
8238
|
-
* The default arguments for this job, specified as name-value pairs. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the
|
8238
|
+
* The default arguments for every run of this job, specified as name-value pairs. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters Used by Glue topic in the developer guide. For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray jobs in the developer guide.
|
8239
8239
|
*/
|
8240
8240
|
DefaultArguments?: GenericMap;
|
8241
8241
|
/**
|
8242
|
-
*
|
8242
|
+
* Arguments for this job that are not overridden when providing job arguments in a job run, specified as name-value pairs.
|
8243
8243
|
*/
|
8244
8244
|
NonOverridableArguments?: GenericMap;
|
8245
8245
|
/**
|
@@ -8259,11 +8259,11 @@ declare namespace Glue {
|
|
8259
8259
|
*/
|
8260
8260
|
Timeout?: Timeout;
|
8261
8261
|
/**
|
8262
|
-
* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the
|
8262
|
+
* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. For Glue version 2.0 or later jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers. Do not set MaxCapacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
|
8263
8263
|
*/
|
8264
8264
|
MaxCapacity?: NullableDouble;
|
8265
8265
|
/**
|
8266
|
-
* The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPU, 64 GB of memory, 256 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later jobs. For the G.8X worker type, each worker maps to 8 DPU (32 vCPU, 128 GB of memory, 512 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
|
8266
|
+
* The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, G.4X, G.8X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPU, 64 GB of memory, 256 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPU, 128 GB of memory, 512 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides a default of 8 Ray workers (1 per vCPU).
|
8267
8267
|
*/
|
8268
8268
|
WorkerType?: WorkerType;
|
8269
8269
|
/**
|
@@ -8279,7 +8279,7 @@ declare namespace Glue {
|
|
8279
8279
|
*/
|
8280
8280
|
NotificationProperty?: NotificationProperty;
|
8281
8281
|
/**
|
8282
|
-
*
|
8282
|
+
* In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in a job. The Python version indicates the version supported for jobs of type Spark. Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python and additional libraries available in your Ray job are determined by the Runtime parameter of the Job command. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9.
|
8283
8283
|
*/
|
8284
8284
|
GlueVersion?: GlueVersionString;
|
8285
8285
|
/**
|
@@ -8338,7 +8338,7 @@ declare namespace Glue {
|
|
8338
8338
|
export type JobBookmarksEncryptionMode = "DISABLED"|"CSE-KMS"|string;
|
8339
8339
|
export interface JobCommand {
|
8340
8340
|
/**
|
8341
|
-
* The name of the job command. For an Apache Spark ETL job, this must be glueetl. For a Python shell job, it must be pythonshell. For an Apache Spark streaming ETL job, this must be gluestreaming.
|
8341
|
+
* The name of the job command. For an Apache Spark ETL job, this must be glueetl. For a Python shell job, it must be pythonshell. For an Apache Spark streaming ETL job, this must be gluestreaming. For a Ray job, this must be glueray.
|
8342
8342
|
*/
|
8343
8343
|
Name?: GenericString;
|
8344
8344
|
/**
|
@@ -8349,6 +8349,10 @@ declare namespace Glue {
|
|
8349
8349
|
* The Python version being used to run a Python shell job. Allowed values are 2 or 3.
|
8350
8350
|
*/
|
8351
8351
|
PythonVersion?: PythonVersionString;
|
8352
|
+
/**
|
8353
|
+
* In Ray jobs, Runtime is used to specify the versions of Ray, Python and additional libraries available in your environment. This field is not used in other job types. For supported runtime environment values, see Working with Ray jobs in the Glue Developer Guide.
|
8354
|
+
*/
|
8355
|
+
Runtime?: RuntimeNameString;
|
8352
8356
|
}
|
8353
8357
|
export type JobList = Job[];
|
8354
8358
|
export type JobName = string;
|
@@ -8397,7 +8401,7 @@ declare namespace Glue {
|
|
8397
8401
|
*/
|
8398
8402
|
JobRunState?: JobRunState;
|
8399
8403
|
/**
|
8400
|
-
* The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. For information about how to specify and consume your own
|
8404
|
+
* The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters Used by Glue topic in the developer guide. For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray jobs in the developer guide.
|
8401
8405
|
*/
|
8402
8406
|
Arguments?: GenericMap;
|
8403
8407
|
/**
|
@@ -8421,11 +8425,11 @@ declare namespace Glue {
|
|
8421
8425
|
*/
|
8422
8426
|
Timeout?: Timeout;
|
8423
8427
|
/**
|
8424
|
-
*
|
8428
|
+
* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers. Do not set MaxCapacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
|
8425
8429
|
*/
|
8426
8430
|
MaxCapacity?: NullableDouble;
|
8427
8431
|
/**
|
8428
|
-
* The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker
|
8432
|
+
* The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.
|
8429
8433
|
*/
|
8430
8434
|
WorkerType?: WorkerType;
|
8431
8435
|
/**
|
@@ -8445,7 +8449,7 @@ declare namespace Glue {
|
|
8445
8449
|
*/
|
8446
8450
|
NotificationProperty?: NotificationProperty;
|
8447
8451
|
/**
|
8448
|
-
*
|
8452
|
+
* In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in a job. The Python version indicates the version supported for jobs of type Spark. Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python and additional libraries available in your Ray job are determined by the Runtime parameter of the Job command. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9.
|
8449
8453
|
*/
|
8450
8454
|
GlueVersion?: GlueVersionString;
|
8451
8455
|
/**
|
@@ -8481,11 +8485,11 @@ declare namespace Glue {
|
|
8481
8485
|
*/
|
8482
8486
|
Command?: JobCommand;
|
8483
8487
|
/**
|
8484
|
-
* The default arguments for this job. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the
|
8488
|
+
* The default arguments for every run of this job, specified as name-value pairs. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters Used by Glue topic in the developer guide. For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray jobs in the developer guide.
|
8485
8489
|
*/
|
8486
8490
|
DefaultArguments?: GenericMap;
|
8487
8491
|
/**
|
8488
|
-
*
|
8492
|
+
* Arguments for this job that are not overridden when providing job arguments in a job run, specified as name-value pairs.
|
8489
8493
|
*/
|
8490
8494
|
NonOverridableArguments?: GenericMap;
|
8491
8495
|
/**
|
@@ -8505,11 +8509,11 @@ declare namespace Glue {
|
|
8505
8509
|
*/
|
8506
8510
|
Timeout?: Timeout;
|
8507
8511
|
/**
|
8508
|
-
* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the
|
8512
|
+
* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers. Do not set MaxCapacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
|
8509
8513
|
*/
|
8510
8514
|
MaxCapacity?: NullableDouble;
|
8511
8515
|
/**
|
8512
|
-
* The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
|
8516
|
+
* The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.
|
8513
8517
|
*/
|
8514
8518
|
WorkerType?: WorkerType;
|
8515
8519
|
/**
|
@@ -8525,7 +8529,7 @@ declare namespace Glue {
|
|
8525
8529
|
*/
|
8526
8530
|
NotificationProperty?: NotificationProperty;
|
8527
8531
|
/**
|
8528
|
-
*
|
8532
|
+
* In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in a job. The Python version indicates the version supported for jobs of type Spark. Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python and additional libraries available in your Ray job are determined by the Runtime parameter of the Job command. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9.
|
8529
8533
|
*/
|
8530
8534
|
GlueVersion?: GlueVersionString;
|
8531
8535
|
/**
|
@@ -10457,6 +10461,7 @@ declare namespace Glue {
|
|
10457
10461
|
*/
|
10458
10462
|
Id?: IntegerValue;
|
10459
10463
|
}
|
10464
|
+
export type RuntimeNameString = string;
|
10460
10465
|
export interface S3CatalogDeltaSource {
|
10461
10466
|
/**
|
10462
10467
|
* The name of the Delta Lake data source.
|
@@ -11689,7 +11694,7 @@ declare namespace Glue {
|
|
11689
11694
|
*/
|
11690
11695
|
JobRunId?: IdString;
|
11691
11696
|
/**
|
11692
|
-
* The job arguments
|
11697
|
+
* The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself. You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job. For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide. For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters Used by Glue topic in the developer guide. For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray jobs in the developer guide.
|
11693
11698
|
*/
|
11694
11699
|
Arguments?: GenericMap;
|
11695
11700
|
/**
|
@@ -11701,7 +11706,7 @@ declare namespace Glue {
|
|
11701
11706
|
*/
|
11702
11707
|
Timeout?: Timeout;
|
11703
11708
|
/**
|
11704
|
-
*
|
11709
|
+
* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers. Do not set MaxCapacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
|
11705
11710
|
*/
|
11706
11711
|
MaxCapacity?: NullableDouble;
|
11707
11712
|
/**
|
@@ -11713,7 +11718,7 @@ declare namespace Glue {
|
|
11713
11718
|
*/
|
11714
11719
|
NotificationProperty?: NotificationProperty;
|
11715
11720
|
/**
|
11716
|
-
* The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker
|
11721
|
+
* The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.
|
11717
11722
|
*/
|
11718
11723
|
WorkerType?: WorkerType;
|
11719
11724
|
/**
|
@@ -13290,7 +13295,7 @@ declare namespace Glue {
|
|
13290
13295
|
export type VersionString = string;
|
13291
13296
|
export type VersionsString = string;
|
13292
13297
|
export type ViewTextString = string;
|
13293
|
-
export type WorkerType = "Standard"|"G.1X"|"G.2X"|"G.025X"|"G.4X"|"G.8X"|string;
|
13298
|
+
export type WorkerType = "Standard"|"G.1X"|"G.2X"|"G.025X"|"G.4X"|"G.8X"|"Z.2X"|string;
|
13294
13299
|
export interface Workflow {
|
13295
13300
|
/**
|
13296
13301
|
* The name of the workflow.
|
@@ -125,11 +125,11 @@ declare class GroundStation extends Service {
|
|
125
125
|
*/
|
126
126
|
getDataflowEndpointGroup(callback?: (err: AWSError, data: GroundStation.Types.GetDataflowEndpointGroupResponse) => void): Request<GroundStation.Types.GetDataflowEndpointGroupResponse, AWSError>;
|
127
127
|
/**
|
128
|
-
* Returns the number of minutes used by account.
|
128
|
+
* Returns the number of reserved minutes used by account.
|
129
129
|
*/
|
130
130
|
getMinuteUsage(params: GroundStation.Types.GetMinuteUsageRequest, callback?: (err: AWSError, data: GroundStation.Types.GetMinuteUsageResponse) => void): Request<GroundStation.Types.GetMinuteUsageResponse, AWSError>;
|
131
131
|
/**
|
132
|
-
* Returns the number of minutes used by account.
|
132
|
+
* Returns the number of reserved minutes used by account.
|
133
133
|
*/
|
134
134
|
getMinuteUsage(callback?: (err: AWSError, data: GroundStation.Types.GetMinuteUsageResponse) => void): Request<GroundStation.Types.GetMinuteUsageResponse, AWSError>;
|
135
135
|
/**
|
@@ -76,11 +76,11 @@ declare class IoTFleetWise extends Service {
|
|
76
76
|
*/
|
77
77
|
createSignalCatalog(callback?: (err: AWSError, data: IoTFleetWise.Types.CreateSignalCatalogResponse) => void): Request<IoTFleetWise.Types.CreateSignalCatalogResponse, AWSError>;
|
78
78
|
/**
|
79
|
-
* Creates a vehicle, which is an instance of a vehicle model (model manifest). Vehicles created from the same vehicle model consist of the same signals inherited from the vehicle model. If you have an existing Amazon Web Services IoT
|
79
|
+
* Creates a vehicle, which is an instance of a vehicle model (model manifest). Vehicles created from the same vehicle model consist of the same signals inherited from the vehicle model. If you have an existing Amazon Web Services IoT thing, you can use Amazon Web Services IoT FleetWise to create a vehicle and collect data from your thing. For more information, see Create a vehicle (AWS CLI) in the Amazon Web Services IoT FleetWise Developer Guide.
|
80
80
|
*/
|
81
81
|
createVehicle(params: IoTFleetWise.Types.CreateVehicleRequest, callback?: (err: AWSError, data: IoTFleetWise.Types.CreateVehicleResponse) => void): Request<IoTFleetWise.Types.CreateVehicleResponse, AWSError>;
|
82
82
|
/**
|
83
|
-
* Creates a vehicle, which is an instance of a vehicle model (model manifest). Vehicles created from the same vehicle model consist of the same signals inherited from the vehicle model. If you have an existing Amazon Web Services IoT
|
83
|
+
* Creates a vehicle, which is an instance of a vehicle model (model manifest). Vehicles created from the same vehicle model consist of the same signals inherited from the vehicle model. If you have an existing Amazon Web Services IoT thing, you can use Amazon Web Services IoT FleetWise to create a vehicle and collect data from your thing. For more information, see Create a vehicle (AWS CLI) in the Amazon Web Services IoT FleetWise Developer Guide.
|
84
84
|
*/
|
85
85
|
createVehicle(callback?: (err: AWSError, data: IoTFleetWise.Types.CreateVehicleResponse) => void): Request<IoTFleetWise.Types.CreateVehicleResponse, AWSError>;
|
86
86
|
/**
|
@@ -340,11 +340,11 @@ declare class IoTFleetWise extends Service {
|
|
340
340
|
*/
|
341
341
|
putLoggingOptions(callback?: (err: AWSError, data: IoTFleetWise.Types.PutLoggingOptionsResponse) => void): Request<IoTFleetWise.Types.PutLoggingOptionsResponse, AWSError>;
|
342
342
|
/**
|
343
|
-
* Registers your Amazon Web Services account, IAM, and Amazon Timestream resources so Amazon Web Services IoT FleetWise can transfer your vehicle data to the Amazon Web Services Cloud. For more information, including step-by-step procedures, see Setting up Amazon Web Services IoT FleetWise
|
343
|
+
* This API operation contains deprecated parameters. Register your account again without the Timestream resources parameter so that Amazon Web Services IoT FleetWise can remove the Timestream metadata stored. You should then pass the data destination into the CreateCampaign API operation. You must delete any existing campaigns that include an empty data destination before you register your account again. For more information, see the DeleteCampaign API operation. If you want to delete the Timestream inline policy from the service-linked role, such as to mitigate an overly permissive policy, you must first delete any existing campaigns. Then delete the service-linked role and register your account again to enable CloudWatch metrics. For more information, see DeleteServiceLinkedRole in the Identity and Access Management API Reference. <p>Registers your Amazon Web Services account, IAM, and Amazon Timestream resources so Amazon Web Services IoT FleetWise can transfer your vehicle data to the Amazon Web Services Cloud. For more information, including step-by-step procedures, see <a href="https://docs.aws.amazon.com/iot-fleetwise/latest/developerguide/setting-up.html">Setting up Amazon Web Services IoT FleetWise</a>. </p> <note> <p>An Amazon Web Services account is <b>not</b> the same thing as a "user." An <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/introduction_identity-management.html#intro-identity-users">Amazon Web Services user</a> is an identity that you create using Identity and Access Management (IAM) and takes the form of either an <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html">IAM user</a> or an <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html">IAM role, both with credentials</a>. A single Amazon Web Services account can, and typically does, contain many users and roles.</p> </note>
|
344
344
|
*/
|
345
345
|
registerAccount(params: IoTFleetWise.Types.RegisterAccountRequest, callback?: (err: AWSError, data: IoTFleetWise.Types.RegisterAccountResponse) => void): Request<IoTFleetWise.Types.RegisterAccountResponse, AWSError>;
|
346
346
|
/**
|
347
|
-
* Registers your Amazon Web Services account, IAM, and Amazon Timestream resources so Amazon Web Services IoT FleetWise can transfer your vehicle data to the Amazon Web Services Cloud. For more information, including step-by-step procedures, see Setting up Amazon Web Services IoT FleetWise
|
347
|
+
* This API operation contains deprecated parameters. Register your account again without the Timestream resources parameter so that Amazon Web Services IoT FleetWise can remove the Timestream metadata stored. You should then pass the data destination into the CreateCampaign API operation. You must delete any existing campaigns that include an empty data destination before you register your account again. For more information, see the DeleteCampaign API operation. If you want to delete the Timestream inline policy from the service-linked role, such as to mitigate an overly permissive policy, you must first delete any existing campaigns. Then delete the service-linked role and register your account again to enable CloudWatch metrics. For more information, see DeleteServiceLinkedRole in the Identity and Access Management API Reference. <p>Registers your Amazon Web Services account, IAM, and Amazon Timestream resources so Amazon Web Services IoT FleetWise can transfer your vehicle data to the Amazon Web Services Cloud. For more information, including step-by-step procedures, see <a href="https://docs.aws.amazon.com/iot-fleetwise/latest/developerguide/setting-up.html">Setting up Amazon Web Services IoT FleetWise</a>. </p> <note> <p>An Amazon Web Services account is <b>not</b> the same thing as a "user." An <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/introduction_identity-management.html#intro-identity-users">Amazon Web Services user</a> is an identity that you create using Identity and Access Management (IAM) and takes the form of either an <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html">IAM user</a> or an <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html">IAM role, both with credentials</a>. A single Amazon Web Services account can, and typically does, contain many users and roles.</p> </note>
|
348
348
|
*/
|
349
349
|
registerAccount(callback?: (err: AWSError, data: IoTFleetWise.Types.RegisterAccountResponse) => void): Request<IoTFleetWise.Types.RegisterAccountResponse, AWSError>;
|
350
350
|
/**
|
@@ -446,6 +446,14 @@ declare namespace IoTFleetWise {
|
|
446
446
|
* A specified value for the actuator.
|
447
447
|
*/
|
448
448
|
assignedValue?: string;
|
449
|
+
/**
|
450
|
+
* The deprecation message for the node or the branch that was moved or deleted.
|
451
|
+
*/
|
452
|
+
deprecationMessage?: message;
|
453
|
+
/**
|
454
|
+
* A comment in addition to the description.
|
455
|
+
*/
|
456
|
+
comment?: message;
|
449
457
|
}
|
450
458
|
export type AmazonResourceName = string;
|
451
459
|
export interface AssociateVehicleFleetRequest {
|
@@ -497,6 +505,14 @@ declare namespace IoTFleetWise {
|
|
497
505
|
* The default value of the attribute.
|
498
506
|
*/
|
499
507
|
defaultValue?: string;
|
508
|
+
/**
|
509
|
+
* The deprecation message for the node or the branch that was moved or deleted.
|
510
|
+
*/
|
511
|
+
deprecationMessage?: message;
|
512
|
+
/**
|
513
|
+
* A comment in addition to the description.
|
514
|
+
*/
|
515
|
+
comment?: message;
|
500
516
|
}
|
501
517
|
export interface BatchCreateVehicleRequest {
|
502
518
|
/**
|
@@ -539,6 +555,14 @@ declare namespace IoTFleetWise {
|
|
539
555
|
* A brief description of the branch.
|
540
556
|
*/
|
541
557
|
description?: description;
|
558
|
+
/**
|
559
|
+
* The deprecation message for the node or the branch that was moved or deleted.
|
560
|
+
*/
|
561
|
+
deprecationMessage?: message;
|
562
|
+
/**
|
563
|
+
* A comment in addition to the description.
|
564
|
+
*/
|
565
|
+
comment?: message;
|
542
566
|
}
|
543
567
|
export type CampaignStatus = "CREATING"|"WAITING_FOR_APPROVAL"|"RUNNING"|"SUSPENDED"|string;
|
544
568
|
export interface CampaignSummary {
|
@@ -618,11 +642,11 @@ declare namespace IoTFleetWise {
|
|
618
642
|
*/
|
619
643
|
isSigned: boolean;
|
620
644
|
/**
|
621
|
-
* Indicates the beginning of the CAN
|
645
|
+
* Indicates the beginning of the CAN signal. This should always be the least significant bit (LSB). This value might be different from the value in a DBC file. For little endian signals, startBit is the same value as in the DBC file. For big endian signals in a DBC file, the start bit is the most significant bit (MSB). You will have to calculate the LSB instead and pass it as the startBit.
|
622
646
|
*/
|
623
647
|
startBit: nonNegativeInteger;
|
624
648
|
/**
|
625
|
-
*
|
649
|
+
* The offset used to calculate the signal value. Combined with factor, the calculation is value = raw_value * factor + offset.
|
626
650
|
*/
|
627
651
|
offset: double;
|
628
652
|
/**
|
@@ -701,7 +725,7 @@ declare namespace IoTFleetWise {
|
|
701
725
|
*/
|
702
726
|
startTime?: timestamp;
|
703
727
|
/**
|
704
|
-
* (Optional) The time the campaign expires, in seconds since epoch (January 1, 1970 at midnight UTC time). Vehicle data
|
728
|
+
* (Optional) The time the campaign expires, in seconds since epoch (January 1, 1970 at midnight UTC time). Vehicle data isn't collected after the campaign expires. Default: 253402214400 (December 31, 9999, 00:00:00 UTC)
|
705
729
|
*/
|
706
730
|
expiryTime?: timestamp;
|
707
731
|
/**
|
@@ -740,6 +764,10 @@ declare namespace IoTFleetWise {
|
|
740
764
|
* Metadata that can be used to manage the campaign.
|
741
765
|
*/
|
742
766
|
tags?: TagList;
|
767
|
+
/**
|
768
|
+
* The destination where the campaign sends data. You can choose to send data to be stored in Amazon S3 or Amazon Timestream. Amazon S3 optimizes the cost of data storage and provides additional mechanisms to use vehicle data, such as data lakes, centralized data storage, data processing pipelines, and analytics. You can use Amazon Timestream to access and analyze time series data, and Timestream to query vehicle data so that you can identify trends and patterns.
|
769
|
+
*/
|
770
|
+
dataDestinationConfigs?: DataDestinationConfigs;
|
743
771
|
}
|
744
772
|
export interface CreateCampaignResponse {
|
745
773
|
/**
|
@@ -903,7 +931,7 @@ declare namespace IoTFleetWise {
|
|
903
931
|
*/
|
904
932
|
decoderManifestArn: arn;
|
905
933
|
/**
|
906
|
-
* Static information about a vehicle in a key-value pair. For example: "engineType" : "1.3 L R2"
|
934
|
+
* Static information about a vehicle in a key-value pair. For example: "engineType" : "1.3 L R2" A campaign must include the keys (attribute names) in dataExtraDimensions for them to display in Amazon Timestream.
|
907
935
|
*/
|
908
936
|
attributes?: attributesMap;
|
909
937
|
/**
|
@@ -969,7 +997,19 @@ declare namespace IoTFleetWise {
|
|
969
997
|
*/
|
970
998
|
thingArn?: arn;
|
971
999
|
}
|
1000
|
+
export interface DataDestinationConfig {
|
1001
|
+
/**
|
1002
|
+
* The Amazon S3 bucket where the Amazon Web Services IoT FleetWise campaign sends data.
|
1003
|
+
*/
|
1004
|
+
s3Config?: S3Config;
|
1005
|
+
/**
|
1006
|
+
* The Amazon Timestream table where the campaign sends data.
|
1007
|
+
*/
|
1008
|
+
timestreamConfig?: TimestreamConfig;
|
1009
|
+
}
|
1010
|
+
export type DataDestinationConfigs = DataDestinationConfig[];
|
972
1011
|
export type DataExtraDimensionNodePathList = NodePath[];
|
1012
|
+
export type DataFormat = "JSON"|"PARQUET"|string;
|
973
1013
|
export interface DecoderManifestSummary {
|
974
1014
|
/**
|
975
1015
|
* The name of the decoder manifest.
|
@@ -1222,6 +1262,10 @@ declare namespace IoTFleetWise {
|
|
1222
1262
|
* The last time the campaign was modified.
|
1223
1263
|
*/
|
1224
1264
|
lastModificationTime?: timestamp;
|
1265
|
+
/**
|
1266
|
+
* The destination where the campaign sends data. You can choose to send data to be stored in Amazon S3 or Amazon Timestream. Amazon S3 optimizes the cost of data storage and provides additional mechanisms to use vehicle data, such as data lakes, centralized data storage, data processing pipelines, and analytics. You can use Amazon Timestream to access and analyze time series data, and Timestream to query vehicle data so that you can identify trends and patterns.
|
1267
|
+
*/
|
1268
|
+
dataDestinationConfigs?: DataDestinationConfigs;
|
1225
1269
|
}
|
1226
1270
|
export interface GetDecoderManifestRequest {
|
1227
1271
|
/**
|
@@ -1349,7 +1393,7 @@ declare namespace IoTFleetWise {
|
|
1349
1393
|
/**
|
1350
1394
|
* Information about the registered Amazon Timestream resources or errors, if any.
|
1351
1395
|
*/
|
1352
|
-
timestreamRegistrationResponse
|
1396
|
+
timestreamRegistrationResponse?: TimestreamRegistrationResponse;
|
1353
1397
|
/**
|
1354
1398
|
* Information about the registered IAM resources or errors, if any.
|
1355
1399
|
*/
|
@@ -1972,7 +2016,7 @@ declare namespace IoTFleetWise {
|
|
1972
2016
|
*/
|
1973
2017
|
scaling: double;
|
1974
2018
|
/**
|
1975
|
-
*
|
2019
|
+
* The offset used to calculate the signal value. Combined with scaling, the calculation is value = raw_value * scaling + offset.
|
1976
2020
|
*/
|
1977
2021
|
offset: double;
|
1978
2022
|
/**
|
@@ -1993,6 +2037,7 @@ declare namespace IoTFleetWise {
|
|
1993
2037
|
bitMaskLength?: ObdBitmaskLength;
|
1994
2038
|
}
|
1995
2039
|
export type ObdStandard = string;
|
2040
|
+
export type Prefix = string;
|
1996
2041
|
export type ProtocolName = string;
|
1997
2042
|
export type ProtocolVersion = string;
|
1998
2043
|
export interface PutLoggingOptionsRequest {
|
@@ -2004,7 +2049,7 @@ declare namespace IoTFleetWise {
|
|
2004
2049
|
export interface PutLoggingOptionsResponse {
|
2005
2050
|
}
|
2006
2051
|
export interface RegisterAccountRequest {
|
2007
|
-
timestreamResources
|
2052
|
+
timestreamResources?: TimestreamResources;
|
2008
2053
|
/**
|
2009
2054
|
* The IAM resource that allows Amazon Web Services IoT FleetWise to send data to Amazon Timestream.
|
2010
2055
|
*/
|
@@ -2015,7 +2060,7 @@ declare namespace IoTFleetWise {
|
|
2015
2060
|
* The status of registering your Amazon Web Services account, IAM role, and Timestream resources.
|
2016
2061
|
*/
|
2017
2062
|
registerAccountStatus: RegistrationStatus;
|
2018
|
-
timestreamResources
|
2063
|
+
timestreamResources?: TimestreamResources;
|
2019
2064
|
/**
|
2020
2065
|
* The registered IAM resource that allows Amazon Web Services IoT FleetWise to send data to Amazon Timestream.
|
2021
2066
|
*/
|
@@ -2030,6 +2075,25 @@ declare namespace IoTFleetWise {
|
|
2030
2075
|
lastModificationTime: timestamp;
|
2031
2076
|
}
|
2032
2077
|
export type RegistrationStatus = "REGISTRATION_PENDING"|"REGISTRATION_SUCCESS"|"REGISTRATION_FAILURE"|string;
|
2078
|
+
export type S3BucketArn = string;
|
2079
|
+
export interface S3Config {
|
2080
|
+
/**
|
2081
|
+
* The Amazon Resource Name (ARN) of the Amazon S3 bucket.
|
2082
|
+
*/
|
2083
|
+
bucketArn: S3BucketArn;
|
2084
|
+
/**
|
2085
|
+
* Specify the format that files are saved in the Amazon S3 bucket. You can save files in an Apache Parquet or JSON format. Parquet - Store data in a columnar storage file format. Parquet is optimal for fast data retrieval and can reduce costs. This option is selected by default. JSON - Store data in a standard text-based JSON file format.
|
2086
|
+
*/
|
2087
|
+
dataFormat?: DataFormat;
|
2088
|
+
/**
|
2089
|
+
* By default, stored data is compressed as a .gzip file. Compressed files have a reduced file size, which can optimize the cost of data storage.
|
2090
|
+
*/
|
2091
|
+
storageCompressionFormat?: StorageCompressionFormat;
|
2092
|
+
/**
|
2093
|
+
* (Optional) Enter an S3 bucket prefix. The prefix is the string of characters after the bucket name and before the object name. You can use the prefix to organize data stored in Amazon S3 buckets. For more information, see Organizing objects using prefixes in the Amazon Simple Storage Service User Guide. By default, Amazon Web Services IoT FleetWise sets the prefix processed-data/year=YY/month=MM/date=DD/hour=HH/ (in UTC) to data it delivers to Amazon S3. You can enter a prefix to append it to this default prefix. For example, if you enter the prefix vehicles, the prefix will be vehicles/processed-data/year=YY/month=MM/date=DD/hour=HH/.
|
2094
|
+
*/
|
2095
|
+
prefix?: Prefix;
|
2096
|
+
}
|
2033
2097
|
export interface Sensor {
|
2034
2098
|
/**
|
2035
2099
|
* The fully qualified name of the sensor. For example, the fully qualified name of a sensor might be Vehicle.Body.Engine.Battery.
|
@@ -2059,6 +2123,14 @@ declare namespace IoTFleetWise {
|
|
2059
2123
|
* The specified possible maximum value of the sensor.
|
2060
2124
|
*/
|
2061
2125
|
max?: double;
|
2126
|
+
/**
|
2127
|
+
* The deprecation message for the node or the branch that was moved or deleted.
|
2128
|
+
*/
|
2129
|
+
deprecationMessage?: message;
|
2130
|
+
/**
|
2131
|
+
* A comment in addition to the description.
|
2132
|
+
*/
|
2133
|
+
comment?: message;
|
2062
2134
|
}
|
2063
2135
|
export interface SignalCatalogSummary {
|
2064
2136
|
/**
|
@@ -2118,6 +2190,7 @@ declare namespace IoTFleetWise {
|
|
2118
2190
|
}
|
2119
2191
|
export type SignalInformationList = SignalInformation[];
|
2120
2192
|
export type SpoolingMode = "OFF"|"TO_DISK"|string;
|
2193
|
+
export type StorageCompressionFormat = "NONE"|"GZIP"|string;
|
2121
2194
|
export type String = string;
|
2122
2195
|
export interface Tag {
|
2123
2196
|
/**
|
@@ -2151,6 +2224,16 @@ declare namespace IoTFleetWise {
|
|
2151
2224
|
*/
|
2152
2225
|
periodMs: collectionPeriodMs;
|
2153
2226
|
}
|
2227
|
+
export interface TimestreamConfig {
|
2228
|
+
/**
|
2229
|
+
* The Amazon Resource Name (ARN) of the Amazon Timestream table.
|
2230
|
+
*/
|
2231
|
+
timestreamTableArn: TimestreamTableArn;
|
2232
|
+
/**
|
2233
|
+
* The Amazon Resource Name (ARN) of the task execution role that grants Amazon Web Services IoT FleetWise permission to deliver data to the Amazon Timestream table.
|
2234
|
+
*/
|
2235
|
+
executionRoleArn: IAMRoleArn;
|
2236
|
+
}
|
2154
2237
|
export type TimestreamDatabaseName = string;
|
2155
2238
|
export interface TimestreamRegistrationResponse {
|
2156
2239
|
/**
|
@@ -2188,6 +2271,7 @@ declare namespace IoTFleetWise {
|
|
2188
2271
|
*/
|
2189
2272
|
timestreamTableName: TimestreamTableName;
|
2190
2273
|
}
|
2274
|
+
export type TimestreamTableArn = string;
|
2191
2275
|
export type TimestreamTableName = string;
|
2192
2276
|
export type TriggerMode = "ALWAYS"|"RISING_EDGE"|string;
|
2193
2277
|
export interface UntagResourceRequest {
|
@@ -2217,7 +2301,7 @@ declare namespace IoTFleetWise {
|
|
2217
2301
|
*/
|
2218
2302
|
dataExtraDimensions?: DataExtraDimensionNodePathList;
|
2219
2303
|
/**
|
2220
|
-
* Specifies how to update a campaign. The action can be one of the following: APPROVE - To approve delivering a data collection scheme to vehicles. SUSPEND - To suspend collecting signal data.
|
2304
|
+
* Specifies how to update a campaign. The action can be one of the following: APPROVE - To approve delivering a data collection scheme to vehicles. SUSPEND - To suspend collecting signal data. The campaign is deleted from vehicles and all vehicles in the suspended campaign will stop sending data. RESUME - To reactivate the SUSPEND campaign. The campaign is redeployed to all vehicles and the vehicles will resume sending data. UPDATE - To update a campaign.
|
2221
2305
|
*/
|
2222
2306
|
action: UpdateCampaignAction;
|
2223
2307
|
}
|
@@ -2513,6 +2597,7 @@ declare namespace IoTFleetWise {
|
|
2513
2597
|
export type listVehiclesMaxResults = number;
|
2514
2598
|
export type maxResults = number;
|
2515
2599
|
export type maxSampleCount = number;
|
2600
|
+
export type message = string;
|
2516
2601
|
export type modelManifestSummaries = ModelManifestSummary[];
|
2517
2602
|
export type nextToken = string;
|
2518
2603
|
export type nonNegativeInteger = number;
|