aws-sdk 2.1418.0 → 2.1420.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +17 -1
- package/README.md +1 -1
- package/apis/codecatalyst-2022-09-28.min.json +294 -24
- package/apis/connectcases-2022-10-03.min.json +35 -25
- package/apis/glue-2017-03-31.min.json +624 -606
- package/apis/rds-2014-10-31.min.json +4 -2
- package/apis/route53resolver-2018-04-01.min.json +201 -62
- package/apis/route53resolver-2018-04-01.paginators.json +6 -0
- package/apis/s3-2006-03-01.examples.json +32 -32
- package/apis/sagemaker-2017-07-24.min.json +205 -158
- package/apis/sagemaker-2017-07-24.paginators.json +6 -0
- package/apis/securitylake-2018-05-10.min.json +139 -34
- package/apis/transcribe-2017-10-26.min.json +30 -6
- package/clients/codecatalyst.d.ts +264 -7
- package/clients/connectcases.d.ts +15 -4
- package/clients/glue.d.ts +29 -6
- package/clients/lexmodelsv2.d.ts +11 -11
- package/clients/mediaconvert.d.ts +3 -3
- package/clients/rds.d.ts +30 -22
- package/clients/route53resolver.d.ts +220 -7
- package/clients/sagemaker.d.ts +88 -10
- package/clients/sagemakerfeaturestoreruntime.d.ts +13 -13
- package/clients/savingsplans.d.ts +1 -1
- package/clients/securitylake.d.ts +93 -11
- package/clients/transcribeservice.d.ts +21 -0
- package/clients/workspaces.d.ts +2 -2
- package/dist/aws-sdk-core-react-native.js +1 -1
- package/dist/aws-sdk-react-native.js +11 -11
- package/dist/aws-sdk.js +7 -5
- package/dist/aws-sdk.min.js +14 -14
- package/lib/core.js +1 -1
- package/package.json +1 -1
@@ -35,6 +35,14 @@ declare class CodeCatalyst extends Service {
|
|
35
35
|
* Creates a project in a specified space.
|
36
36
|
*/
|
37
37
|
createProject(callback?: (err: AWSError, data: CodeCatalyst.Types.CreateProjectResponse) => void): Request<CodeCatalyst.Types.CreateProjectResponse, AWSError>;
|
38
|
+
/**
|
39
|
+
* Creates an empty Git-based source repository in a specified project. The repository is created with an initial empty commit with a default branch named main.
|
40
|
+
*/
|
41
|
+
createSourceRepository(params: CodeCatalyst.Types.CreateSourceRepositoryRequest, callback?: (err: AWSError, data: CodeCatalyst.Types.CreateSourceRepositoryResponse) => void): Request<CodeCatalyst.Types.CreateSourceRepositoryResponse, AWSError>;
|
42
|
+
/**
|
43
|
+
* Creates an empty Git-based source repository in a specified project. The repository is created with an initial empty commit with a default branch named main.
|
44
|
+
*/
|
45
|
+
createSourceRepository(callback?: (err: AWSError, data: CodeCatalyst.Types.CreateSourceRepositoryResponse) => void): Request<CodeCatalyst.Types.CreateSourceRepositoryResponse, AWSError>;
|
38
46
|
/**
|
39
47
|
* Creates a branch in a specified source repository in Amazon CodeCatalyst. This API only creates a branch in a source repository hosted in Amazon CodeCatalyst. You cannot use this API to create a branch in a linked repository.
|
40
48
|
*/
|
@@ -59,6 +67,30 @@ declare class CodeCatalyst extends Service {
|
|
59
67
|
* Deletes a Dev Environment.
|
60
68
|
*/
|
61
69
|
deleteDevEnvironment(callback?: (err: AWSError, data: CodeCatalyst.Types.DeleteDevEnvironmentResponse) => void): Request<CodeCatalyst.Types.DeleteDevEnvironmentResponse, AWSError>;
|
70
|
+
/**
|
71
|
+
* Deletes a project in a space.
|
72
|
+
*/
|
73
|
+
deleteProject(params: CodeCatalyst.Types.DeleteProjectRequest, callback?: (err: AWSError, data: CodeCatalyst.Types.DeleteProjectResponse) => void): Request<CodeCatalyst.Types.DeleteProjectResponse, AWSError>;
|
74
|
+
/**
|
75
|
+
* Deletes a project in a space.
|
76
|
+
*/
|
77
|
+
deleteProject(callback?: (err: AWSError, data: CodeCatalyst.Types.DeleteProjectResponse) => void): Request<CodeCatalyst.Types.DeleteProjectResponse, AWSError>;
|
78
|
+
/**
|
79
|
+
* Deletes a source repository in Amazon CodeCatalyst. You cannot use this API to delete a linked repository. It can only be used to delete a Amazon CodeCatalyst source repository.
|
80
|
+
*/
|
81
|
+
deleteSourceRepository(params: CodeCatalyst.Types.DeleteSourceRepositoryRequest, callback?: (err: AWSError, data: CodeCatalyst.Types.DeleteSourceRepositoryResponse) => void): Request<CodeCatalyst.Types.DeleteSourceRepositoryResponse, AWSError>;
|
82
|
+
/**
|
83
|
+
* Deletes a source repository in Amazon CodeCatalyst. You cannot use this API to delete a linked repository. It can only be used to delete a Amazon CodeCatalyst source repository.
|
84
|
+
*/
|
85
|
+
deleteSourceRepository(callback?: (err: AWSError, data: CodeCatalyst.Types.DeleteSourceRepositoryResponse) => void): Request<CodeCatalyst.Types.DeleteSourceRepositoryResponse, AWSError>;
|
86
|
+
/**
|
87
|
+
* Deletes a space. Deleting a space cannot be undone. Additionally, since space names must be unique across Amazon CodeCatalyst, you cannot reuse names of deleted spaces.
|
88
|
+
*/
|
89
|
+
deleteSpace(params: CodeCatalyst.Types.DeleteSpaceRequest, callback?: (err: AWSError, data: CodeCatalyst.Types.DeleteSpaceResponse) => void): Request<CodeCatalyst.Types.DeleteSpaceResponse, AWSError>;
|
90
|
+
/**
|
91
|
+
* Deletes a space. Deleting a space cannot be undone. Additionally, since space names must be unique across Amazon CodeCatalyst, you cannot reuse names of deleted spaces.
|
92
|
+
*/
|
93
|
+
deleteSpace(callback?: (err: AWSError, data: CodeCatalyst.Types.DeleteSpaceResponse) => void): Request<CodeCatalyst.Types.DeleteSpaceResponse, AWSError>;
|
62
94
|
/**
|
63
95
|
* Returns information about a Dev Environment for a source repository in a project. Dev Environments are specific to the user who creates them.
|
64
96
|
*/
|
@@ -75,6 +107,14 @@ declare class CodeCatalyst extends Service {
|
|
75
107
|
* Returns information about a project.
|
76
108
|
*/
|
77
109
|
getProject(callback?: (err: AWSError, data: CodeCatalyst.Types.GetProjectResponse) => void): Request<CodeCatalyst.Types.GetProjectResponse, AWSError>;
|
110
|
+
/**
|
111
|
+
* Returns information about a source repository.
|
112
|
+
*/
|
113
|
+
getSourceRepository(params: CodeCatalyst.Types.GetSourceRepositoryRequest, callback?: (err: AWSError, data: CodeCatalyst.Types.GetSourceRepositoryResponse) => void): Request<CodeCatalyst.Types.GetSourceRepositoryResponse, AWSError>;
|
114
|
+
/**
|
115
|
+
* Returns information about a source repository.
|
116
|
+
*/
|
117
|
+
getSourceRepository(callback?: (err: AWSError, data: CodeCatalyst.Types.GetSourceRepositoryResponse) => void): Request<CodeCatalyst.Types.GetSourceRepositoryResponse, AWSError>;
|
78
118
|
/**
|
79
119
|
* Returns information about the URLs that can be used with a Git client to clone a source repository.
|
80
120
|
*/
|
@@ -211,6 +251,22 @@ declare class CodeCatalyst extends Service {
|
|
211
251
|
* Changes one or more values for a Dev Environment. Updating certain values of the Dev Environment will cause a restart.
|
212
252
|
*/
|
213
253
|
updateDevEnvironment(callback?: (err: AWSError, data: CodeCatalyst.Types.UpdateDevEnvironmentResponse) => void): Request<CodeCatalyst.Types.UpdateDevEnvironmentResponse, AWSError>;
|
254
|
+
/**
|
255
|
+
* Changes one or more values for a project.
|
256
|
+
*/
|
257
|
+
updateProject(params: CodeCatalyst.Types.UpdateProjectRequest, callback?: (err: AWSError, data: CodeCatalyst.Types.UpdateProjectResponse) => void): Request<CodeCatalyst.Types.UpdateProjectResponse, AWSError>;
|
258
|
+
/**
|
259
|
+
* Changes one or more values for a project.
|
260
|
+
*/
|
261
|
+
updateProject(callback?: (err: AWSError, data: CodeCatalyst.Types.UpdateProjectResponse) => void): Request<CodeCatalyst.Types.UpdateProjectResponse, AWSError>;
|
262
|
+
/**
|
263
|
+
* Changes one or more values for a space.
|
264
|
+
*/
|
265
|
+
updateSpace(params: CodeCatalyst.Types.UpdateSpaceRequest, callback?: (err: AWSError, data: CodeCatalyst.Types.UpdateSpaceResponse) => void): Request<CodeCatalyst.Types.UpdateSpaceResponse, AWSError>;
|
266
|
+
/**
|
267
|
+
* Changes one or more values for a space.
|
268
|
+
*/
|
269
|
+
updateSpace(callback?: (err: AWSError, data: CodeCatalyst.Types.UpdateSpaceResponse) => void): Request<CodeCatalyst.Types.UpdateSpaceResponse, AWSError>;
|
214
270
|
/**
|
215
271
|
* Verifies whether the calling user has a valid Amazon CodeCatalyst login and session. If successful, this returns the ID of the user in Amazon CodeCatalyst.
|
216
272
|
*/
|
@@ -288,7 +344,7 @@ declare namespace CodeCatalyst {
|
|
288
344
|
*/
|
289
345
|
alias?: CreateDevEnvironmentRequestAliasString;
|
290
346
|
/**
|
291
|
-
* Information about the integrated development environment (IDE) configured for a Dev Environment. An IDE is required to create a Dev Environment. For Dev Environment creation, this field contains configuration information and must be provided.
|
347
|
+
* Information about the integrated development environment (IDE) configured for a Dev Environment. An IDE is required to create a Dev Environment. For Dev Environment creation, this field contains configuration information and must be provided.
|
292
348
|
*/
|
293
349
|
ides?: IdeConfigurationList;
|
294
350
|
/**
|
@@ -391,6 +447,42 @@ declare namespace CodeCatalyst {
|
|
391
447
|
*/
|
392
448
|
headCommitId?: String;
|
393
449
|
}
|
450
|
+
export interface CreateSourceRepositoryRequest {
|
451
|
+
/**
|
452
|
+
* The name of the space.
|
453
|
+
*/
|
454
|
+
spaceName: NameString;
|
455
|
+
/**
|
456
|
+
* The name of the project in the space.
|
457
|
+
*/
|
458
|
+
projectName: NameString;
|
459
|
+
/**
|
460
|
+
* The name of the source repository. For more information about name requirements, see Quotas for source repositories.
|
461
|
+
*/
|
462
|
+
name: SourceRepositoryNameString;
|
463
|
+
/**
|
464
|
+
* The description of the source repository.
|
465
|
+
*/
|
466
|
+
description?: SourceRepositoryDescriptionString;
|
467
|
+
}
|
468
|
+
export interface CreateSourceRepositoryResponse {
|
469
|
+
/**
|
470
|
+
* The name of the space.
|
471
|
+
*/
|
472
|
+
spaceName: NameString;
|
473
|
+
/**
|
474
|
+
* The name of the project in the space.
|
475
|
+
*/
|
476
|
+
projectName: NameString;
|
477
|
+
/**
|
478
|
+
* The name of the source repository.
|
479
|
+
*/
|
480
|
+
name: SourceRepositoryNameString;
|
481
|
+
/**
|
482
|
+
* The description of the source repository.
|
483
|
+
*/
|
484
|
+
description?: SourceRepositoryDescriptionString;
|
485
|
+
}
|
394
486
|
export interface DeleteAccessTokenRequest {
|
395
487
|
/**
|
396
488
|
* The ID of the personal access token to delete. You can find the IDs of all PATs associated with your Amazon Web Services Builder ID in a space by calling ListAccessTokens.
|
@@ -427,6 +519,74 @@ declare namespace CodeCatalyst {
|
|
427
519
|
*/
|
428
520
|
id: Uuid;
|
429
521
|
}
|
522
|
+
export interface DeleteProjectRequest {
|
523
|
+
/**
|
524
|
+
* The name of the space.
|
525
|
+
*/
|
526
|
+
spaceName: NameString;
|
527
|
+
/**
|
528
|
+
* The name of the project in the space. To retrieve a list of project names, use ListProjects.
|
529
|
+
*/
|
530
|
+
name: NameString;
|
531
|
+
}
|
532
|
+
export interface DeleteProjectResponse {
|
533
|
+
/**
|
534
|
+
* The name of the space.
|
535
|
+
*/
|
536
|
+
spaceName: NameString;
|
537
|
+
/**
|
538
|
+
* The name of the project in the space.
|
539
|
+
*/
|
540
|
+
name: NameString;
|
541
|
+
/**
|
542
|
+
* The friendly name displayed to users of the project in Amazon CodeCatalyst.
|
543
|
+
*/
|
544
|
+
displayName?: String;
|
545
|
+
}
|
546
|
+
export interface DeleteSourceRepositoryRequest {
|
547
|
+
/**
|
548
|
+
* The name of the space.
|
549
|
+
*/
|
550
|
+
spaceName: NameString;
|
551
|
+
/**
|
552
|
+
* The name of the project in the space.
|
553
|
+
*/
|
554
|
+
projectName: NameString;
|
555
|
+
/**
|
556
|
+
* The name of the source repository.
|
557
|
+
*/
|
558
|
+
name: SourceRepositoryNameString;
|
559
|
+
}
|
560
|
+
export interface DeleteSourceRepositoryResponse {
|
561
|
+
/**
|
562
|
+
* The name of the space.
|
563
|
+
*/
|
564
|
+
spaceName: NameString;
|
565
|
+
/**
|
566
|
+
* The name of the project in the space.
|
567
|
+
*/
|
568
|
+
projectName: NameString;
|
569
|
+
/**
|
570
|
+
* The name of the repository.
|
571
|
+
*/
|
572
|
+
name: SourceRepositoryNameString;
|
573
|
+
}
|
574
|
+
export interface DeleteSpaceRequest {
|
575
|
+
/**
|
576
|
+
* The name of the space. To retrieve a list of space names, use ListSpaces.
|
577
|
+
*/
|
578
|
+
name: NameString;
|
579
|
+
}
|
580
|
+
export interface DeleteSpaceResponse {
|
581
|
+
/**
|
582
|
+
* The name of the space.
|
583
|
+
*/
|
584
|
+
name: NameString;
|
585
|
+
/**
|
586
|
+
* The friendly name of the space displayed to users of the space in Amazon CodeCatalyst.
|
587
|
+
*/
|
588
|
+
displayName?: String;
|
589
|
+
}
|
430
590
|
export interface DevEnvironmentAccessDetails {
|
431
591
|
/**
|
432
592
|
* The URL used to send commands to and from the Dev Environment.
|
@@ -610,7 +770,7 @@ declare namespace CodeCatalyst {
|
|
610
770
|
*/
|
611
771
|
sourceIpAddress?: String;
|
612
772
|
/**
|
613
|
-
*
|
773
|
+
* The user agent whose actions are recorded in the event.
|
614
774
|
*/
|
615
775
|
userAgent?: String;
|
616
776
|
}
|
@@ -639,15 +799,15 @@ declare namespace CodeCatalyst {
|
|
639
799
|
export type ExecuteCommandSessionConfigurationCommandString = string;
|
640
800
|
export interface Filter {
|
641
801
|
/**
|
642
|
-
*
|
802
|
+
* A key that can be used to sort results.
|
643
803
|
*/
|
644
804
|
key: String;
|
645
805
|
/**
|
646
|
-
*
|
806
|
+
* The values of the key.
|
647
807
|
*/
|
648
808
|
values: StringList;
|
649
809
|
/**
|
650
|
-
*
|
810
|
+
* The operator used to compare the fields.
|
651
811
|
*/
|
652
812
|
comparisonOperator?: String;
|
653
813
|
}
|
@@ -771,6 +931,46 @@ declare namespace CodeCatalyst {
|
|
771
931
|
*/
|
772
932
|
https: String;
|
773
933
|
}
|
934
|
+
export interface GetSourceRepositoryRequest {
|
935
|
+
/**
|
936
|
+
* The name of the space.
|
937
|
+
*/
|
938
|
+
spaceName: NameString;
|
939
|
+
/**
|
940
|
+
* The name of the project in the space.
|
941
|
+
*/
|
942
|
+
projectName: NameString;
|
943
|
+
/**
|
944
|
+
* The name of the source repository.
|
945
|
+
*/
|
946
|
+
name: SourceRepositoryNameString;
|
947
|
+
}
|
948
|
+
export interface GetSourceRepositoryResponse {
|
949
|
+
/**
|
950
|
+
* The name of the space.
|
951
|
+
*/
|
952
|
+
spaceName: NameString;
|
953
|
+
/**
|
954
|
+
* The name of the project in the space.
|
955
|
+
*/
|
956
|
+
projectName: NameString;
|
957
|
+
/**
|
958
|
+
* The name of the source repository.
|
959
|
+
*/
|
960
|
+
name: SourceRepositoryNameString;
|
961
|
+
/**
|
962
|
+
* The description of the source repository.
|
963
|
+
*/
|
964
|
+
description?: SourceRepositoryDescriptionString;
|
965
|
+
/**
|
966
|
+
* The time the source repository was last updated, in coordinated universal time (UTC) timestamp format as specified in RFC 3339.
|
967
|
+
*/
|
968
|
+
lastUpdatedTime: Timestamp;
|
969
|
+
/**
|
970
|
+
* The time the source repository was created, in coordinated universal time (UTC) timestamp format as specified in RFC 3339.
|
971
|
+
*/
|
972
|
+
createdTime: Timestamp;
|
973
|
+
}
|
774
974
|
export interface GetSpaceRequest {
|
775
975
|
/**
|
776
976
|
* The name of the space.
|
@@ -1188,7 +1388,7 @@ declare namespace CodeCatalyst {
|
|
1188
1388
|
*/
|
1189
1389
|
key: FilterKey;
|
1190
1390
|
/**
|
1191
|
-
* The
|
1391
|
+
* The values of the key.
|
1192
1392
|
*/
|
1193
1393
|
values: StringList;
|
1194
1394
|
/**
|
@@ -1230,6 +1430,7 @@ declare namespace CodeCatalyst {
|
|
1230
1430
|
export type SourceRepositoryDescriptionString = string;
|
1231
1431
|
export type SourceRepositoryIdString = string;
|
1232
1432
|
export type SourceRepositoryNameString = string;
|
1433
|
+
export type SpaceDescription = string;
|
1233
1434
|
export type SpaceSummaries = SpaceSummary[];
|
1234
1435
|
export interface SpaceSummary {
|
1235
1436
|
/**
|
@@ -1473,13 +1674,69 @@ declare namespace CodeCatalyst {
|
|
1473
1674
|
clientToken?: ClientToken;
|
1474
1675
|
}
|
1475
1676
|
export type UpdateDevEnvironmentResponseAliasString = string;
|
1677
|
+
export interface UpdateProjectRequest {
|
1678
|
+
/**
|
1679
|
+
* The name of the space.
|
1680
|
+
*/
|
1681
|
+
spaceName: NameString;
|
1682
|
+
/**
|
1683
|
+
* The name of the project.
|
1684
|
+
*/
|
1685
|
+
name: NameString;
|
1686
|
+
/**
|
1687
|
+
* The description of the project.
|
1688
|
+
*/
|
1689
|
+
description?: ProjectDescription;
|
1690
|
+
}
|
1691
|
+
export interface UpdateProjectResponse {
|
1692
|
+
/**
|
1693
|
+
* The name of the space.
|
1694
|
+
*/
|
1695
|
+
spaceName?: NameString;
|
1696
|
+
/**
|
1697
|
+
* The name of the project.
|
1698
|
+
*/
|
1699
|
+
name?: NameString;
|
1700
|
+
/**
|
1701
|
+
* The friendly name of the project displayed to users in Amazon CodeCatalyst.
|
1702
|
+
*/
|
1703
|
+
displayName?: String;
|
1704
|
+
/**
|
1705
|
+
* The description of the project.
|
1706
|
+
*/
|
1707
|
+
description?: String;
|
1708
|
+
}
|
1709
|
+
export interface UpdateSpaceRequest {
|
1710
|
+
/**
|
1711
|
+
* The name of the space.
|
1712
|
+
*/
|
1713
|
+
name: NameString;
|
1714
|
+
/**
|
1715
|
+
* The description of the space.
|
1716
|
+
*/
|
1717
|
+
description?: SpaceDescription;
|
1718
|
+
}
|
1719
|
+
export interface UpdateSpaceResponse {
|
1720
|
+
/**
|
1721
|
+
* The name of the space.
|
1722
|
+
*/
|
1723
|
+
name?: NameString;
|
1724
|
+
/**
|
1725
|
+
* The friendly name of the space displayed to users in Amazon CodeCatalyst.
|
1726
|
+
*/
|
1727
|
+
displayName?: String;
|
1728
|
+
/**
|
1729
|
+
* The description of the space.
|
1730
|
+
*/
|
1731
|
+
description?: String;
|
1732
|
+
}
|
1476
1733
|
export interface UserIdentity {
|
1477
1734
|
/**
|
1478
1735
|
* The role assigned to the user in a Amazon CodeCatalyst space or project when the event occurred.
|
1479
1736
|
*/
|
1480
1737
|
userType: UserType;
|
1481
1738
|
/**
|
1482
|
-
*
|
1739
|
+
* The ID of the Amazon CodeCatalyst service principal.
|
1483
1740
|
*/
|
1484
1741
|
principalId: String;
|
1485
1742
|
/**
|
@@ -36,11 +36,11 @@ declare class ConnectCases extends Service {
|
|
36
36
|
*/
|
37
37
|
createCase(callback?: (err: AWSError, data: ConnectCases.Types.CreateCaseResponse) => void): Request<ConnectCases.Types.CreateCaseResponse, AWSError>;
|
38
38
|
/**
|
39
|
-
* Creates a domain, which is a container for all case data, such as cases, fields, templates and layouts. Each Amazon Connect instance can be associated with only one Cases domain. This will not associate your connect instance to Cases domain. Instead, use the Amazon Connect CreateIntegrationAssociation API. You need specific IAM permissions to successfully associate the Cases domain. For more information, see Onboard to Cases.
|
39
|
+
* Creates a domain, which is a container for all case data, such as cases, fields, templates and layouts. Each Amazon Connect instance can be associated with only one Cases domain. This will not associate your connect instance to Cases domain. Instead, use the Amazon Connect CreateIntegrationAssociation API. You need specific IAM permissions to successfully associate the Cases domain. For more information, see Onboard to Cases. </important>
|
40
40
|
*/
|
41
41
|
createDomain(params: ConnectCases.Types.CreateDomainRequest, callback?: (err: AWSError, data: ConnectCases.Types.CreateDomainResponse) => void): Request<ConnectCases.Types.CreateDomainResponse, AWSError>;
|
42
42
|
/**
|
43
|
-
* Creates a domain, which is a container for all case data, such as cases, fields, templates and layouts. Each Amazon Connect instance can be associated with only one Cases domain. This will not associate your connect instance to Cases domain. Instead, use the Amazon Connect CreateIntegrationAssociation API. You need specific IAM permissions to successfully associate the Cases domain. For more information, see Onboard to Cases.
|
43
|
+
* Creates a domain, which is a container for all case data, such as cases, fields, templates and layouts. Each Amazon Connect instance can be associated with only one Cases domain. This will not associate your connect instance to Cases domain. Instead, use the Amazon Connect CreateIntegrationAssociation API. You need specific IAM permissions to successfully associate the Cases domain. For more information, see Onboard to Cases. </important>
|
44
44
|
*/
|
45
45
|
createDomain(callback?: (err: AWSError, data: ConnectCases.Types.CreateDomainResponse) => void): Request<ConnectCases.Types.CreateDomainResponse, AWSError>;
|
46
46
|
/**
|
@@ -76,11 +76,11 @@ declare class ConnectCases extends Service {
|
|
76
76
|
*/
|
77
77
|
createTemplate(callback?: (err: AWSError, data: ConnectCases.Types.CreateTemplateResponse) => void): Request<ConnectCases.Types.CreateTemplateResponse, AWSError>;
|
78
78
|
/**
|
79
|
-
* Deletes a domain.
|
79
|
+
* Deletes a Cases domain. <note> <p>After deleting your domain you must disassociate the deleted domain from your Amazon Connect instance with another API call before being able to use Cases again with this Amazon Connect instance. See <a href="https://docs.aws.amazon.com/connect/latest/APIReference/API_DeleteIntegrationAssociation.html">DeleteIntegrationAssociation</a>.</p> </note>
|
80
80
|
*/
|
81
81
|
deleteDomain(params: ConnectCases.Types.DeleteDomainRequest, callback?: (err: AWSError, data: ConnectCases.Types.DeleteDomainResponse) => void): Request<ConnectCases.Types.DeleteDomainResponse, AWSError>;
|
82
82
|
/**
|
83
|
-
* Deletes a domain.
|
83
|
+
* Deletes a Cases domain. <note> <p>After deleting your domain you must disassociate the deleted domain from your Amazon Connect instance with another API call before being able to use Cases again with this Amazon Connect instance. See <a href="https://docs.aws.amazon.com/connect/latest/APIReference/API_DeleteIntegrationAssociation.html">DeleteIntegrationAssociation</a>.</p> </note>
|
84
84
|
*/
|
85
85
|
deleteDomain(callback?: (err: AWSError, data: ConnectCases.Types.DeleteDomainResponse) => void): Request<ConnectCases.Types.DeleteDomainResponse, AWSError>;
|
86
86
|
/**
|
@@ -329,8 +329,13 @@ declare namespace ConnectCases {
|
|
329
329
|
*/
|
330
330
|
field?: FieldFilter;
|
331
331
|
not?: CaseFilter;
|
332
|
+
/**
|
333
|
+
* Provides "or all" filtering.
|
334
|
+
*/
|
335
|
+
orAll?: CaseFilterOrAllList;
|
332
336
|
}
|
333
337
|
export type CaseFilterAndAllList = CaseFilter[];
|
338
|
+
export type CaseFilterOrAllList = CaseFilter[];
|
334
339
|
export type CaseId = string;
|
335
340
|
export interface CaseSummary {
|
336
341
|
/**
|
@@ -585,6 +590,8 @@ declare namespace ConnectCases {
|
|
585
590
|
}
|
586
591
|
export type DomainSummaryList = DomainSummary[];
|
587
592
|
export type Double = number;
|
593
|
+
export interface EmptyFieldValue {
|
594
|
+
}
|
588
595
|
export interface EventBridgeConfiguration {
|
589
596
|
/**
|
590
597
|
* Indicates whether the to broadcast case event data to the customer.
|
@@ -747,6 +754,10 @@ declare namespace ConnectCases {
|
|
747
754
|
* Can be either null, or have a Double number value type. Only one value can be provided.
|
748
755
|
*/
|
749
756
|
doubleValue?: Double;
|
757
|
+
/**
|
758
|
+
* An empty value.
|
759
|
+
*/
|
760
|
+
emptyValue?: EmptyFieldValue;
|
750
761
|
/**
|
751
762
|
* String value type.
|
752
763
|
*/
|
package/clients/glue.d.ts
CHANGED
@@ -3535,6 +3535,10 @@ declare namespace Glue {
|
|
3535
3535
|
* Specifies Apache Iceberg data store targets.
|
3536
3536
|
*/
|
3537
3537
|
IcebergTargets?: IcebergTargetList;
|
3538
|
+
/**
|
3539
|
+
* Specifies Apache Hudi data store targets.
|
3540
|
+
*/
|
3541
|
+
HudiTargets?: HudiTargetList;
|
3538
3542
|
}
|
3539
3543
|
export interface CrawlsFilter {
|
3540
3544
|
/**
|
@@ -4017,7 +4021,7 @@ declare namespace Glue {
|
|
4017
4021
|
*/
|
4018
4022
|
NumberOfWorkers?: NullableInteger;
|
4019
4023
|
/**
|
4020
|
-
* The type of predefined worker that is allocated when a job runs. Accepts a value of
|
4024
|
+
* The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
|
4021
4025
|
*/
|
4022
4026
|
WorkerType?: WorkerType;
|
4023
4027
|
/**
|
@@ -4355,7 +4359,7 @@ declare namespace Glue {
|
|
4355
4359
|
*/
|
4356
4360
|
NumberOfWorkers?: NullableInteger;
|
4357
4361
|
/**
|
4358
|
-
* The type of predefined worker that is allocated
|
4362
|
+
* The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
|
4359
4363
|
*/
|
4360
4364
|
WorkerType?: WorkerType;
|
4361
4365
|
/**
|
@@ -8064,7 +8068,26 @@ declare namespace Glue {
|
|
8064
8068
|
}
|
8065
8069
|
export type GrokPattern = string;
|
8066
8070
|
export type HashString = string;
|
8071
|
+
export interface HudiTarget {
|
8072
|
+
/**
|
8073
|
+
* An array of Amazon S3 location strings for Hudi, each indicating the root folder with which the metadata files for a Hudi table resides. The Hudi folder may be located in a child folder of the root folder. The crawler will scan all folders underneath a path for a Hudi folder.
|
8074
|
+
*/
|
8075
|
+
Paths?: PathList;
|
8076
|
+
/**
|
8077
|
+
* The name of the connection to use to connect to the Hudi target. If your Hudi files are stored in buckets that require VPC authorization, you can set their connection properties here.
|
8078
|
+
*/
|
8079
|
+
ConnectionName?: ConnectionName;
|
8080
|
+
/**
|
8081
|
+
* A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.
|
8082
|
+
*/
|
8083
|
+
Exclusions?: PathList;
|
8084
|
+
/**
|
8085
|
+
* The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run time.
|
8086
|
+
*/
|
8087
|
+
MaximumTraversalDepth?: NullableInteger;
|
8088
|
+
}
|
8067
8089
|
export type HudiTargetCompressionType = "gzip"|"lzo"|"uncompressed"|"snappy"|string;
|
8090
|
+
export type HudiTargetList = HudiTarget[];
|
8068
8091
|
export interface IcebergInput {
|
8069
8092
|
/**
|
8070
8093
|
* A required metadata operation. Can only be set to CREATE.
|
@@ -8305,7 +8328,7 @@ declare namespace Glue {
|
|
8305
8328
|
*/
|
8306
8329
|
MaxCapacity?: NullableDouble;
|
8307
8330
|
/**
|
8308
|
-
* The type of predefined worker that is allocated when a job runs. Accepts a value of
|
8331
|
+
* The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
|
8309
8332
|
*/
|
8310
8333
|
WorkerType?: WorkerType;
|
8311
8334
|
/**
|
@@ -8471,7 +8494,7 @@ declare namespace Glue {
|
|
8471
8494
|
*/
|
8472
8495
|
MaxCapacity?: NullableDouble;
|
8473
8496
|
/**
|
8474
|
-
* The type of predefined worker that is allocated when a job runs. Accepts a value of
|
8497
|
+
* The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
|
8475
8498
|
*/
|
8476
8499
|
WorkerType?: WorkerType;
|
8477
8500
|
/**
|
@@ -8555,7 +8578,7 @@ declare namespace Glue {
|
|
8555
8578
|
*/
|
8556
8579
|
MaxCapacity?: NullableDouble;
|
8557
8580
|
/**
|
8558
|
-
* The type of predefined worker that is allocated when a job runs. Accepts a value of
|
8581
|
+
* The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
|
8559
8582
|
*/
|
8560
8583
|
WorkerType?: WorkerType;
|
8561
8584
|
/**
|
@@ -11775,7 +11798,7 @@ declare namespace Glue {
|
|
11775
11798
|
*/
|
11776
11799
|
NotificationProperty?: NotificationProperty;
|
11777
11800
|
/**
|
11778
|
-
* The type of predefined worker that is allocated when a job runs. Accepts a value of
|
11801
|
+
* The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
|
11779
11802
|
*/
|
11780
11803
|
WorkerType?: WorkerType;
|
11781
11804
|
/**
|