@aws-sdk/client-glue 3.810.0 → 3.812.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/dist-cjs/index.js +50 -26
  2. package/dist-es/models/models_0.js +14 -0
  3. package/dist-es/models/models_2.js +0 -14
  4. package/dist-es/models/models_3.js +14 -0
  5. package/dist-es/protocols/Aws_json1_1.js +8 -2
  6. package/dist-types/commands/BatchGetJobsCommand.d.ts +70 -27
  7. package/dist-types/commands/BatchGetWorkflowsCommand.d.ts +2 -1
  8. package/dist-types/commands/BatchPutDataQualityStatisticAnnotationCommand.d.ts +1 -1
  9. package/dist-types/commands/BatchStopJobRunCommand.d.ts +1 -2
  10. package/dist-types/commands/CreateJobCommand.d.ts +70 -27
  11. package/dist-types/commands/GetColumnStatisticsTaskRunCommand.d.ts +2 -1
  12. package/dist-types/commands/GetColumnStatisticsTaskRunsCommand.d.ts +1 -1
  13. package/dist-types/commands/GetColumnStatisticsTaskSettingsCommand.d.ts +1 -2
  14. package/dist-types/commands/GetJobCommand.d.ts +70 -27
  15. package/dist-types/commands/GetJobsCommand.d.ts +70 -27
  16. package/dist-types/commands/ResumeWorkflowRunCommand.d.ts +1 -1
  17. package/dist-types/commands/RunStatementCommand.d.ts +1 -1
  18. package/dist-types/commands/UpdateJobCommand.d.ts +70 -27
  19. package/dist-types/models/models_0.d.ts +208 -77
  20. package/dist-types/models/models_1.d.ts +78 -167
  21. package/dist-types/models/models_2.d.ts +167 -83
  22. package/dist-types/models/models_3.d.ts +98 -1
  23. package/dist-types/ts3.4/commands/BatchGetWorkflowsCommand.d.ts +2 -4
  24. package/dist-types/ts3.4/commands/BatchPutDataQualityStatisticAnnotationCommand.d.ts +1 -1
  25. package/dist-types/ts3.4/commands/BatchStopJobRunCommand.d.ts +4 -2
  26. package/dist-types/ts3.4/commands/GetColumnStatisticsTaskRunCommand.d.ts +2 -4
  27. package/dist-types/ts3.4/commands/GetColumnStatisticsTaskRunsCommand.d.ts +1 -1
  28. package/dist-types/ts3.4/commands/GetColumnStatisticsTaskSettingsCommand.d.ts +4 -2
  29. package/dist-types/ts3.4/commands/ResumeWorkflowRunCommand.d.ts +1 -1
  30. package/dist-types/ts3.4/commands/RunStatementCommand.d.ts +1 -1
  31. package/dist-types/ts3.4/models/models_0.d.ts +56 -20
  32. package/dist-types/ts3.4/models/models_1.d.ts +23 -38
  33. package/dist-types/ts3.4/models/models_2.d.ts +40 -25
  34. package/dist-types/ts3.4/models/models_3.d.ts +31 -0
  35. package/package.json +5 -5
@@ -205,12 +205,12 @@ declare const UpdateJobCommand_base: {
205
205
  * },
206
206
  * ],
207
207
  * },
208
- * S3JsonSource: { // S3JsonSource
208
+ * S3ExcelSource: { // S3ExcelSource
209
209
  * Name: "STRING_VALUE", // required
210
210
  * Paths: [ // required
211
211
  * "STRING_VALUE",
212
212
  * ],
213
- * CompressionType: "gzip" || "bzip2",
213
+ * CompressionType: "snappy" || "lzo" || "gzip" || "brotli" || "lz4" || "uncompressed" || "none",
214
214
  * Exclusions: [
215
215
  * "STRING_VALUE",
216
216
  * ],
@@ -225,8 +225,8 @@ declare const UpdateJobCommand_base: {
225
225
  * EnableSamplePath: true || false,
226
226
  * SamplePath: "STRING_VALUE",
227
227
  * },
228
- * JsonPath: "STRING_VALUE",
229
- * Multiline: true || false,
228
+ * NumberRows: Number("long"),
229
+ * SkipFooter: Number("int"),
230
230
  * OutputSchemas: [
231
231
  * {
232
232
  * Columns: [
@@ -238,10 +238,30 @@ declare const UpdateJobCommand_base: {
238
238
  * },
239
239
  * ],
240
240
  * },
241
+ * S3JsonSource: { // S3JsonSource
242
+ * Name: "STRING_VALUE", // required
243
+ * Paths: "<EnclosedInStringProperties>", // required
244
+ * CompressionType: "gzip" || "bzip2",
245
+ * Exclusions: "<EnclosedInStringProperties>",
246
+ * GroupSize: "STRING_VALUE",
247
+ * GroupFiles: "STRING_VALUE",
248
+ * Recurse: true || false,
249
+ * MaxBand: Number("int"),
250
+ * MaxFilesInBand: Number("int"),
251
+ * AdditionalOptions: {
252
+ * BoundedSize: Number("long"),
253
+ * BoundedFiles: Number("long"),
254
+ * EnableSamplePath: true || false,
255
+ * SamplePath: "STRING_VALUE",
256
+ * },
257
+ * JsonPath: "STRING_VALUE",
258
+ * Multiline: true || false,
259
+ * OutputSchemas: "<GlueSchemas>",
260
+ * },
241
261
  * S3ParquetSource: { // S3ParquetSource
242
262
  * Name: "STRING_VALUE", // required
243
263
  * Paths: "<EnclosedInStringProperties>", // required
244
- * CompressionType: "snappy" || "lzo" || "gzip" || "uncompressed" || "none",
264
+ * CompressionType: "snappy" || "lzo" || "gzip" || "brotli" || "lz4" || "uncompressed" || "none",
245
265
  * Exclusions: "<EnclosedInStringProperties>",
246
266
  * GroupSize: "STRING_VALUE",
247
267
  * GroupFiles: "STRING_VALUE",
@@ -343,7 +363,8 @@ declare const UpdateJobCommand_base: {
343
363
  * "<EnclosedInStringProperties>",
344
364
  * ],
345
365
  * Path: "STRING_VALUE", // required
346
- * Compression: "snappy" || "lzo" || "gzip" || "uncompressed" || "none",
366
+ * Compression: "snappy" || "lzo" || "gzip" || "brotli" || "lz4" || "uncompressed" || "none",
367
+ * NumberTargetPartitions: "STRING_VALUE",
347
368
  * SchemaChangePolicy: { // DirectSchemaChangePolicy
348
369
  * EnableUpdateCatalog: true || false,
349
370
  * UpdateBehavior: "UPDATE_IN_DATABASE" || "LOG",
@@ -351,6 +372,21 @@ declare const UpdateJobCommand_base: {
351
372
  * Database: "STRING_VALUE",
352
373
  * },
353
374
  * },
375
+ * S3HyperDirectTarget: { // S3HyperDirectTarget
376
+ * Name: "STRING_VALUE", // required
377
+ * Inputs: "<OneInput>", // required
378
+ * PartitionKeys: [
379
+ * "<EnclosedInStringProperties>",
380
+ * ],
381
+ * Path: "STRING_VALUE", // required
382
+ * Compression: "uncompressed",
383
+ * SchemaChangePolicy: {
384
+ * EnableUpdateCatalog: true || false,
385
+ * UpdateBehavior: "UPDATE_IN_DATABASE" || "LOG",
386
+ * Table: "STRING_VALUE",
387
+ * Database: "STRING_VALUE",
388
+ * },
389
+ * },
354
390
  * S3DirectTarget: { // S3DirectTarget
355
391
  * Name: "STRING_VALUE", // required
356
392
  * Inputs: "<OneInput>", // required
@@ -359,13 +395,32 @@ declare const UpdateJobCommand_base: {
359
395
  * ],
360
396
  * Path: "STRING_VALUE", // required
361
397
  * Compression: "STRING_VALUE",
362
- * Format: "json" || "csv" || "avro" || "orc" || "parquet" || "hudi" || "delta", // required
398
+ * NumberTargetPartitions: "STRING_VALUE",
399
+ * Format: "json" || "csv" || "avro" || "orc" || "parquet" || "hudi" || "delta" || "iceberg" || "hyper" || "xml", // required
400
+ * SchemaChangePolicy: {
401
+ * EnableUpdateCatalog: true || false,
402
+ * UpdateBehavior: "UPDATE_IN_DATABASE" || "LOG",
403
+ * Table: "STRING_VALUE",
404
+ * Database: "STRING_VALUE",
405
+ * },
406
+ * },
407
+ * S3IcebergDirectTarget: { // S3IcebergDirectTarget
408
+ * Name: "STRING_VALUE", // required
409
+ * Inputs: "<OneInput>", // required
410
+ * PartitionKeys: "<GlueStudioPathList>",
411
+ * Path: "STRING_VALUE", // required
412
+ * Format: "json" || "csv" || "avro" || "orc" || "parquet" || "hudi" || "delta" || "iceberg" || "hyper" || "xml", // required
413
+ * AdditionalOptions: {
414
+ * "<keys>": "STRING_VALUE",
415
+ * },
363
416
  * SchemaChangePolicy: {
364
417
  * EnableUpdateCatalog: true || false,
365
418
  * UpdateBehavior: "UPDATE_IN_DATABASE" || "LOG",
366
419
  * Table: "STRING_VALUE",
367
420
  * Database: "STRING_VALUE",
368
421
  * },
422
+ * Compression: "gzip" || "lzo" || "uncompressed" || "snappy", // required
423
+ * NumberTargetPartitions: "STRING_VALUE",
369
424
  * },
370
425
  * ApplyMapping: { // ApplyMapping
371
426
  * Name: "STRING_VALUE", // required
@@ -393,9 +448,7 @@ declare const UpdateJobCommand_base: {
393
448
  * SelectFields: { // SelectFields
394
449
  * Name: "STRING_VALUE", // required
395
450
  * Inputs: "<OneInput>", // required
396
- * Paths: [ // required
397
- * "<EnclosedInStringProperties>",
398
- * ],
451
+ * Paths: "<GlueStudioPathList>", // required
399
452
  * },
400
453
  * DropFields: { // DropFields
401
454
  * Name: "STRING_VALUE", // required
@@ -788,9 +841,7 @@ declare const UpdateJobCommand_base: {
788
841
  * Name: "STRING_VALUE", // required
789
842
  * Database: "STRING_VALUE", // required
790
843
  * Table: "STRING_VALUE", // required
791
- * AdditionalHudiOptions: {
792
- * "<keys>": "STRING_VALUE",
793
- * },
844
+ * AdditionalHudiOptions: "<AdditionalOptions>",
794
845
  * OutputSchemas: "<GlueSchemas>",
795
846
  * },
796
847
  * S3HudiSource: { // S3HudiSource
@@ -822,8 +873,9 @@ declare const UpdateJobCommand_base: {
822
873
  * Inputs: "<OneInput>", // required
823
874
  * Path: "STRING_VALUE", // required
824
875
  * Compression: "gzip" || "lzo" || "uncompressed" || "snappy", // required
876
+ * NumberTargetPartitions: "STRING_VALUE",
825
877
  * PartitionKeys: "<GlueStudioPathList>",
826
- * Format: "json" || "csv" || "avro" || "orc" || "parquet" || "hudi" || "delta", // required
878
+ * Format: "json" || "csv" || "avro" || "orc" || "parquet" || "hudi" || "delta" || "iceberg" || "hyper" || "xml", // required
827
879
  * AdditionalOptions: "<AdditionalOptions>", // required
828
880
  * SchemaChangePolicy: {
829
881
  * EnableUpdateCatalog: true || false,
@@ -858,12 +910,7 @@ declare const UpdateJobCommand_base: {
858
910
  * Name: "STRING_VALUE", // required
859
911
  * Paths: "<EnclosedInStringProperties>", // required
860
912
  * AdditionalDeltaOptions: "<AdditionalOptions>",
861
- * AdditionalOptions: {
862
- * BoundedSize: Number("long"),
863
- * BoundedFiles: Number("long"),
864
- * EnableSamplePath: true || false,
865
- * SamplePath: "STRING_VALUE",
866
- * },
913
+ * AdditionalOptions: "<S3DirectSourceAdditionalOptions>",
867
914
  * OutputSchemas: "<GlueSchemas>",
868
915
  * },
869
916
  * S3DeltaCatalogTarget: { // S3DeltaCatalogTarget
@@ -884,14 +931,10 @@ declare const UpdateJobCommand_base: {
884
931
  * PartitionKeys: "<GlueStudioPathList>",
885
932
  * Path: "STRING_VALUE", // required
886
933
  * Compression: "uncompressed" || "snappy", // required
887
- * Format: "json" || "csv" || "avro" || "orc" || "parquet" || "hudi" || "delta", // required
934
+ * NumberTargetPartitions: "STRING_VALUE",
935
+ * Format: "json" || "csv" || "avro" || "orc" || "parquet" || "hudi" || "delta" || "iceberg" || "hyper" || "xml", // required
888
936
  * AdditionalOptions: "<AdditionalOptions>",
889
- * SchemaChangePolicy: {
890
- * EnableUpdateCatalog: true || false,
891
- * UpdateBehavior: "UPDATE_IN_DATABASE" || "LOG",
892
- * Table: "STRING_VALUE",
893
- * Database: "STRING_VALUE",
894
- * },
937
+ * SchemaChangePolicy: "<DirectSchemaChangePolicy>",
895
938
  * },
896
939
  * AmazonRedshiftSource: { // AmazonRedshiftSource
897
940
  * Name: "STRING_VALUE",
@@ -5387,9 +5387,12 @@ export declare const TargetFormat: {
5387
5387
  readonly CSV: "csv";
5388
5388
  readonly DELTA: "delta";
5389
5389
  readonly HUDI: "hudi";
5390
+ readonly HYPER: "hyper";
5391
+ readonly ICEBERG: "iceberg";
5390
5392
  readonly JSON: "json";
5391
5393
  readonly ORC: "orc";
5392
5394
  readonly PARQUET: "parquet";
5395
+ readonly XML: "xml";
5393
5396
  };
5394
5397
  /**
5395
5398
  * @public
@@ -5451,6 +5454,11 @@ export interface S3DeltaDirectTarget {
5451
5454
  * @public
5452
5455
  */
5453
5456
  Compression: DeltaTargetCompressionType | undefined;
5457
+ /**
5458
+ * <p>Specifies the number of target partitions for distributing Delta Lake dataset files across Amazon S3.</p>
5459
+ * @public
5460
+ */
5461
+ NumberTargetPartitions?: string | undefined;
5454
5462
  /**
5455
5463
  * <p>Specifies the data output format for the target.</p>
5456
5464
  * @public
@@ -5528,6 +5536,11 @@ export interface S3DirectTarget {
5528
5536
  * @public
5529
5537
  */
5530
5538
  Compression?: string | undefined;
5539
+ /**
5540
+ * <p>Specifies the number of target partitions when writing data directly to Amazon S3.</p>
5541
+ * @public
5542
+ */
5543
+ NumberTargetPartitions?: string | undefined;
5531
5544
  /**
5532
5545
  * <p>Specifies the data output format for the target.</p>
5533
5546
  * @public
@@ -5544,7 +5557,9 @@ export interface S3DirectTarget {
5544
5557
  * @enum
5545
5558
  */
5546
5559
  export declare const ParquetCompressionType: {
5560
+ readonly BROTLI: "brotli";
5547
5561
  readonly GZIP: "gzip";
5562
+ readonly LZ4: "lz4";
5548
5563
  readonly LZO: "lzo";
5549
5564
  readonly NONE: "none";
5550
5565
  readonly SNAPPY: "snappy";
@@ -5554,6 +5569,77 @@ export declare const ParquetCompressionType: {
5554
5569
  * @public
5555
5570
  */
5556
5571
  export type ParquetCompressionType = (typeof ParquetCompressionType)[keyof typeof ParquetCompressionType];
5572
+ /**
5573
+ * <p>Specifies an S3 Excel data source.</p>
5574
+ * @public
5575
+ */
5576
+ export interface S3ExcelSource {
5577
+ /**
5578
+ * <p>The name of the S3 Excel data source.</p>
5579
+ * @public
5580
+ */
5581
+ Name: string | undefined;
5582
+ /**
5583
+ * <p>The S3 paths where the Excel files are located.</p>
5584
+ * @public
5585
+ */
5586
+ Paths: string[] | undefined;
5587
+ /**
5588
+ * <p>The compression format used for the Excel files.</p>
5589
+ * @public
5590
+ */
5591
+ CompressionType?: ParquetCompressionType | undefined;
5592
+ /**
5593
+ * <p>Patterns to exclude specific files or paths from processing.</p>
5594
+ * @public
5595
+ */
5596
+ Exclusions?: string[] | undefined;
5597
+ /**
5598
+ * <p>Defines the size of file groups for batch processing.</p>
5599
+ * @public
5600
+ */
5601
+ GroupSize?: string | undefined;
5602
+ /**
5603
+ * <p>Specifies how files should be grouped for processing.</p>
5604
+ * @public
5605
+ */
5606
+ GroupFiles?: string | undefined;
5607
+ /**
5608
+ * <p>Indicates whether to recursively process subdirectories.</p>
5609
+ * @public
5610
+ */
5611
+ Recurse?: boolean | undefined;
5612
+ /**
5613
+ * <p>The maximum number of processing bands to use.</p>
5614
+ * @public
5615
+ */
5616
+ MaxBand?: number | undefined;
5617
+ /**
5618
+ * <p>The maximum number of files to process in each band.</p>
5619
+ * @public
5620
+ */
5621
+ MaxFilesInBand?: number | undefined;
5622
+ /**
5623
+ * <p>Additional configuration options for S3 direct source processing.</p>
5624
+ * @public
5625
+ */
5626
+ AdditionalOptions?: S3DirectSourceAdditionalOptions | undefined;
5627
+ /**
5628
+ * <p>The number of rows to process from each Excel file.</p>
5629
+ * @public
5630
+ */
5631
+ NumberRows?: number | undefined;
5632
+ /**
5633
+ * <p>The number of rows to skip at the end of each Excel file.</p>
5634
+ * @public
5635
+ */
5636
+ SkipFooter?: number | undefined;
5637
+ /**
5638
+ * <p>The AWS Glue schemas to apply to the processed data.</p>
5639
+ * @public
5640
+ */
5641
+ OutputSchemas?: GlueSchema[] | undefined;
5642
+ }
5557
5643
  /**
5558
5644
  * <p>Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.</p>
5559
5645
  * @public
@@ -5584,6 +5670,11 @@ export interface S3GlueParquetTarget {
5584
5670
  * @public
5585
5671
  */
5586
5672
  Compression?: ParquetCompressionType | undefined;
5673
+ /**
5674
+ * <p>Specifies the number of target partitions for Parquet files when writing to Amazon S3 using AWS Glue.</p>
5675
+ * @public
5676
+ */
5677
+ NumberTargetPartitions?: string | undefined;
5587
5678
  /**
5588
5679
  * <p>A policy that specifies update behavior for the crawler.</p>
5589
5680
  * @public
@@ -5670,6 +5761,11 @@ export interface S3HudiDirectTarget {
5670
5761
  * @public
5671
5762
  */
5672
5763
  Compression: HudiTargetCompressionType | undefined;
5764
+ /**
5765
+ * <p>Specifies the number of target partitions for distributing Hudi dataset files across Amazon S3.</p>
5766
+ * @public
5767
+ */
5768
+ NumberTargetPartitions?: string | undefined;
5673
5769
  /**
5674
5770
  * <p>Specifies native partitioning using a sequence of keys.</p>
5675
5771
  * @public
@@ -5722,6 +5818,118 @@ export interface S3HudiSource {
5722
5818
  */
5723
5819
  OutputSchemas?: GlueSchema[] | undefined;
5724
5820
  }
5821
+ /**
5822
+ * @public
5823
+ * @enum
5824
+ */
5825
+ export declare const HyperTargetCompressionType: {
5826
+ readonly UNCOMPRESSED: "uncompressed";
5827
+ };
5828
+ /**
5829
+ * @public
5830
+ */
5831
+ export type HyperTargetCompressionType = (typeof HyperTargetCompressionType)[keyof typeof HyperTargetCompressionType];
5832
+ /**
5833
+ * <p>Specifies a HyperDirect data target that writes to Amazon S3.</p>
5834
+ * @public
5835
+ */
5836
+ export interface S3HyperDirectTarget {
5837
+ /**
5838
+ * <p>The unique identifier for the HyperDirect target node.</p>
5839
+ * @public
5840
+ */
5841
+ Name: string | undefined;
5842
+ /**
5843
+ * <p>Specifies the input source for the HyperDirect target.</p>
5844
+ * @public
5845
+ */
5846
+ Inputs: string[] | undefined;
5847
+ /**
5848
+ * <p>Defines the partitioning strategy for the output data.</p>
5849
+ * @public
5850
+ */
5851
+ PartitionKeys?: string[][] | undefined;
5852
+ /**
5853
+ * <p>The S3 location where the output data will be written.</p>
5854
+ * @public
5855
+ */
5856
+ Path: string | undefined;
5857
+ /**
5858
+ * <p>The compression type to apply to the output data.</p>
5859
+ * @public
5860
+ */
5861
+ Compression?: HyperTargetCompressionType | undefined;
5862
+ /**
5863
+ * <p>Defines how schema changes are handled during write operations.</p>
5864
+ * @public
5865
+ */
5866
+ SchemaChangePolicy?: DirectSchemaChangePolicy | undefined;
5867
+ }
5868
+ /**
5869
+ * @public
5870
+ * @enum
5871
+ */
5872
+ export declare const IcebergTargetCompressionType: {
5873
+ readonly GZIP: "gzip";
5874
+ readonly LZO: "lzo";
5875
+ readonly SNAPPY: "snappy";
5876
+ readonly UNCOMPRESSED: "uncompressed";
5877
+ };
5878
+ /**
5879
+ * @public
5880
+ */
5881
+ export type IcebergTargetCompressionType = (typeof IcebergTargetCompressionType)[keyof typeof IcebergTargetCompressionType];
5882
+ /**
5883
+ * <p>Specifies a target that writes to an Iceberg data source in Amazon S3.</p>
5884
+ * @public
5885
+ */
5886
+ export interface S3IcebergDirectTarget {
5887
+ /**
5888
+ * <p>Specifies the unique identifier for the Iceberg target node in your data pipeline.</p>
5889
+ * @public
5890
+ */
5891
+ Name: string | undefined;
5892
+ /**
5893
+ * <p>Defines the single input source that provides data to this Iceberg target.</p>
5894
+ * @public
5895
+ */
5896
+ Inputs: string[] | undefined;
5897
+ /**
5898
+ * <p>Specifies the columns used to partition the Iceberg table data in S3.</p>
5899
+ * @public
5900
+ */
5901
+ PartitionKeys?: string[][] | undefined;
5902
+ /**
5903
+ * <p>Defines the S3 location where the Iceberg table data will be stored.</p>
5904
+ * @public
5905
+ */
5906
+ Path: string | undefined;
5907
+ /**
5908
+ * <p>Specifies the file format used for storing Iceberg table data (e.g., Parquet, ORC).</p>
5909
+ * @public
5910
+ */
5911
+ Format: TargetFormat | undefined;
5912
+ /**
5913
+ * <p>Provides additional configuration options for customizing the Iceberg table behavior.</p>
5914
+ * @public
5915
+ */
5916
+ AdditionalOptions?: Record<string, string> | undefined;
5917
+ /**
5918
+ * <p>Defines how schema changes are handled when writing data to the Iceberg table.</p>
5919
+ * @public
5920
+ */
5921
+ SchemaChangePolicy?: DirectSchemaChangePolicy | undefined;
5922
+ /**
5923
+ * <p>Specifies the compression codec used for Iceberg table files in S3.</p>
5924
+ * @public
5925
+ */
5926
+ Compression: IcebergTargetCompressionType | undefined;
5927
+ /**
5928
+ * <p>Sets the number of target partitions for distributing Iceberg table files across S3.</p>
5929
+ * @public
5930
+ */
5931
+ NumberTargetPartitions?: string | undefined;
5932
+ }
5725
5933
  /**
5726
5934
  * <p>Specifies a JSON data store stored in Amazon S3.</p>
5727
5935
  * @public
@@ -8010,83 +8218,6 @@ export interface Workflow {
8010
8218
  */
8011
8219
  BlueprintDetails?: BlueprintDetails | undefined;
8012
8220
  }
8013
- /**
8014
- * @public
8015
- */
8016
- export interface BatchGetWorkflowsResponse {
8017
- /**
8018
- * <p>A list of workflow resource metadata.</p>
8019
- * @public
8020
- */
8021
- Workflows?: Workflow[] | undefined;
8022
- /**
8023
- * <p>A list of names of workflows not found.</p>
8024
- * @public
8025
- */
8026
- MissingWorkflows?: string[] | undefined;
8027
- }
8028
- /**
8029
- * <p>An Inclusion Annotation.</p>
8030
- * @public
8031
- */
8032
- export interface DatapointInclusionAnnotation {
8033
- /**
8034
- * <p>The ID of the data quality profile the statistic belongs to.</p>
8035
- * @public
8036
- */
8037
- ProfileId?: string | undefined;
8038
- /**
8039
- * <p>The Statistic ID.</p>
8040
- * @public
8041
- */
8042
- StatisticId?: string | undefined;
8043
- /**
8044
- * <p>The inclusion annotation value to apply to the statistic.</p>
8045
- * @public
8046
- */
8047
- InclusionAnnotation?: InclusionAnnotationValue | undefined;
8048
- }
8049
- /**
8050
- * @public
8051
- */
8052
- export interface BatchPutDataQualityStatisticAnnotationRequest {
8053
- /**
8054
- * <p>A list of <code>DatapointInclusionAnnotation</code>'s.</p>
8055
- * @public
8056
- */
8057
- InclusionAnnotations: DatapointInclusionAnnotation[] | undefined;
8058
- /**
8059
- * <p>Client Token.</p>
8060
- * @public
8061
- */
8062
- ClientToken?: string | undefined;
8063
- }
8064
- /**
8065
- * @public
8066
- */
8067
- export interface BatchPutDataQualityStatisticAnnotationResponse {
8068
- /**
8069
- * <p>A list of <code>AnnotationError</code>'s.</p>
8070
- * @public
8071
- */
8072
- FailedInclusionAnnotations?: AnnotationError[] | undefined;
8073
- }
8074
- /**
8075
- * @public
8076
- */
8077
- export interface BatchStopJobRunRequest {
8078
- /**
8079
- * <p>The name of the job definition for which to stop job runs.</p>
8080
- * @public
8081
- */
8082
- JobName: string | undefined;
8083
- /**
8084
- * <p>A list of the <code>JobRunIds</code> that should be stopped for that job
8085
- * definition.</p>
8086
- * @public
8087
- */
8088
- JobRunIds: string[] | undefined;
8089
- }
8090
8221
  /**
8091
8222
  * @internal
8092
8223
  */