aws-sdk-glue 1.129.0 → 1.131.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: e8028355900f774bd585cfcc878f0e683094d04973031cf85fd12ae9d6495fed
4
- data.tar.gz: fe572fdbe07ddf4ccc008ad02a6ee919747c6ec1a7d3523ab67473468ef146a4
3
+ metadata.gz: dc04dd874075a1f2ddf7af7490a830862fd73ed98012976faef7f8bcc88e4bcd
4
+ data.tar.gz: e67c22567ef9eb6a35dbe08c7ccf3b5f01765d7aea36931100b53cecc5a9ef28
5
5
  SHA512:
6
- metadata.gz: 9f16f6ca0a16d15472a85872643f703d1b2e28083b5f8d9d15c8f5ec2195bef8ebaf50d3a591d786e9bb31baa1b2084a340ec6e688b4d008c154b1868dc313e8
7
- data.tar.gz: bdff3c1e0efc2fed1b3d63cc767b98f398ba77670caefca1d257c493e0a71e46506ad814dd0da4dd468c813e8ed8b3573f9a8434291c13be7055e200c7f9f7d1
6
+ metadata.gz: abfead73f43be41c1adb9f9d6425d3f2eb07661cef26367e0f3a83dacbf0adb50f459384cdbcdd7ca5cc75ae9572b4b88dc46ab10c3e19f0f7f2a44644f7e117
7
+ data.tar.gz: c217d590e1a39d4b26d35283c6d39471c6023a2a6cde6534a96475459884c67990880746201f3c4c9f4bc5aa111602cf12e4e474320e9ebcad409ffe7808900b
data/CHANGELOG.md CHANGED
@@ -1,6 +1,16 @@
1
1
  Unreleased Changes
2
2
  ------------------
3
3
 
4
+ 1.131.0 (2023-02-15)
5
+ ------------------
6
+
7
+ * Feature - Fix DirectJDBCSource not showing up in CLI code gen
8
+
9
+ 1.130.0 (2023-02-08)
10
+ ------------------
11
+
12
+ * Feature - DirectJDBCSource + Glue 4.0 streaming options
13
+
4
14
  1.129.0 (2023-01-19)
5
15
  ------------------
6
16
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.129.0
1
+ 1.131.0
@@ -1353,6 +1353,8 @@ module Aws::Glue
1353
1353
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.stream_arn #=> String
1354
1354
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.role_arn #=> String
1355
1355
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.role_session_name #=> String
1356
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.add_record_timestamp #=> String
1357
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.emit_consumer_lag_metrics #=> String
1356
1358
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.data_preview_options.polling_time #=> Integer
1357
1359
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.data_preview_options.record_polling_limit #=> Integer
1358
1360
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.name #=> String
@@ -1371,6 +1373,9 @@ module Aws::Glue
1371
1373
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.retry_interval_ms #=> Integer
1372
1374
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.max_offsets_per_trigger #=> Integer
1373
1375
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.min_partitions #=> Integer
1376
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.include_headers #=> Boolean
1377
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.add_record_timestamp #=> String
1378
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.emit_consumer_lag_metrics #=> String
1374
1379
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.window_size #=> Integer
1375
1380
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.detect_schema #=> Boolean
1376
1381
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.data_preview_options.polling_time #=> Integer
@@ -1398,6 +1403,8 @@ module Aws::Glue
1398
1403
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.stream_arn #=> String
1399
1404
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.role_arn #=> String
1400
1405
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.role_session_name #=> String
1406
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.add_record_timestamp #=> String
1407
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.emit_consumer_lag_metrics #=> String
1401
1408
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.data_preview_options.polling_time #=> Integer
1402
1409
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.data_preview_options.record_polling_limit #=> Integer
1403
1410
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.name #=> String
@@ -1420,6 +1427,9 @@ module Aws::Glue
1420
1427
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.retry_interval_ms #=> Integer
1421
1428
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.max_offsets_per_trigger #=> Integer
1422
1429
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.min_partitions #=> Integer
1430
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.include_headers #=> Boolean
1431
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.add_record_timestamp #=> String
1432
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.emit_consumer_lag_metrics #=> String
1423
1433
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.data_preview_options.polling_time #=> Integer
1424
1434
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.data_preview_options.record_polling_limit #=> Integer
1425
1435
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.name #=> String
@@ -1601,6 +1611,12 @@ module Aws::Glue
1601
1611
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_hudi_direct_target.schema_change_policy.update_behavior #=> String, one of "UPDATE_IN_DATABASE", "LOG"
1602
1612
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_hudi_direct_target.schema_change_policy.table #=> String
1603
1613
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_hudi_direct_target.schema_change_policy.database #=> String
1614
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_jdbc_source.name #=> String
1615
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_jdbc_source.database #=> String
1616
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_jdbc_source.table #=> String
1617
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_jdbc_source.connection_name #=> String
1618
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_jdbc_source.connection_type #=> String, one of "sqlserver", "mysql", "oracle", "postgresql", "redshift"
1619
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_jdbc_source.redshift_tmp_dir #=> String
1604
1620
  # resp.jobs[0].execution_class #=> String, one of "FLEX", "STANDARD"
1605
1621
  # resp.jobs[0].source_control_details.provider #=> String, one of "GITHUB", "AWS_CODE_COMMIT"
1606
1622
  # resp.jobs[0].source_control_details.repository #=> String
@@ -3628,6 +3644,8 @@ module Aws::Glue
3628
3644
  # stream_arn: "EnclosedInStringProperty",
3629
3645
  # role_arn: "EnclosedInStringProperty",
3630
3646
  # role_session_name: "EnclosedInStringProperty",
3647
+ # add_record_timestamp: "EnclosedInStringProperty",
3648
+ # emit_consumer_lag_metrics: "EnclosedInStringProperty",
3631
3649
  # },
3632
3650
  # data_preview_options: {
3633
3651
  # polling_time: 1,
@@ -3652,6 +3670,9 @@ module Aws::Glue
3652
3670
  # retry_interval_ms: 1,
3653
3671
  # max_offsets_per_trigger: 1,
3654
3672
  # min_partitions: 1,
3673
+ # include_headers: false,
3674
+ # add_record_timestamp: "EnclosedInStringProperty",
3675
+ # emit_consumer_lag_metrics: "EnclosedInStringProperty",
3655
3676
  # },
3656
3677
  # window_size: 1,
3657
3678
  # detect_schema: false,
@@ -3685,6 +3706,8 @@ module Aws::Glue
3685
3706
  # stream_arn: "EnclosedInStringProperty",
3686
3707
  # role_arn: "EnclosedInStringProperty",
3687
3708
  # role_session_name: "EnclosedInStringProperty",
3709
+ # add_record_timestamp: "EnclosedInStringProperty",
3710
+ # emit_consumer_lag_metrics: "EnclosedInStringProperty",
3688
3711
  # },
3689
3712
  # data_preview_options: {
3690
3713
  # polling_time: 1,
@@ -3713,6 +3736,9 @@ module Aws::Glue
3713
3736
  # retry_interval_ms: 1,
3714
3737
  # max_offsets_per_trigger: 1,
3715
3738
  # min_partitions: 1,
3739
+ # include_headers: false,
3740
+ # add_record_timestamp: "EnclosedInStringProperty",
3741
+ # emit_consumer_lag_metrics: "EnclosedInStringProperty",
3716
3742
  # },
3717
3743
  # data_preview_options: {
3718
3744
  # polling_time: 1,
@@ -3975,6 +4001,14 @@ module Aws::Glue
3975
4001
  # database: "EnclosedInStringProperty",
3976
4002
  # },
3977
4003
  # },
4004
+ # direct_jdbc_source: {
4005
+ # name: "NodeName", # required
4006
+ # database: "EnclosedInStringProperty", # required
4007
+ # table: "EnclosedInStringProperty", # required
4008
+ # connection_name: "EnclosedInStringProperty", # required
4009
+ # connection_type: "sqlserver", # required, accepts sqlserver, mysql, oracle, postgresql, redshift
4010
+ # redshift_tmp_dir: "EnclosedInStringProperty",
4011
+ # },
3978
4012
  # },
3979
4013
  # },
3980
4014
  # execution_class: "FLEX", # accepts FLEX, STANDARD
@@ -7721,6 +7755,8 @@ module Aws::Glue
7721
7755
  # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.stream_arn #=> String
7722
7756
  # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.role_arn #=> String
7723
7757
  # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.role_session_name #=> String
7758
+ # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.add_record_timestamp #=> String
7759
+ # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.emit_consumer_lag_metrics #=> String
7724
7760
  # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.data_preview_options.polling_time #=> Integer
7725
7761
  # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.data_preview_options.record_polling_limit #=> Integer
7726
7762
  # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.name #=> String
@@ -7739,6 +7775,9 @@ module Aws::Glue
7739
7775
  # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.retry_interval_ms #=> Integer
7740
7776
  # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.max_offsets_per_trigger #=> Integer
7741
7777
  # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.min_partitions #=> Integer
7778
+ # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.include_headers #=> Boolean
7779
+ # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.add_record_timestamp #=> String
7780
+ # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.emit_consumer_lag_metrics #=> String
7742
7781
  # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.window_size #=> Integer
7743
7782
  # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.detect_schema #=> Boolean
7744
7783
  # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.data_preview_options.polling_time #=> Integer
@@ -7766,6 +7805,8 @@ module Aws::Glue
7766
7805
  # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.stream_arn #=> String
7767
7806
  # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.role_arn #=> String
7768
7807
  # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.role_session_name #=> String
7808
+ # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.add_record_timestamp #=> String
7809
+ # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.emit_consumer_lag_metrics #=> String
7769
7810
  # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.data_preview_options.polling_time #=> Integer
7770
7811
  # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.data_preview_options.record_polling_limit #=> Integer
7771
7812
  # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.name #=> String
@@ -7788,6 +7829,9 @@ module Aws::Glue
7788
7829
  # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.retry_interval_ms #=> Integer
7789
7830
  # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.max_offsets_per_trigger #=> Integer
7790
7831
  # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.min_partitions #=> Integer
7832
+ # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.include_headers #=> Boolean
7833
+ # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.add_record_timestamp #=> String
7834
+ # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.emit_consumer_lag_metrics #=> String
7791
7835
  # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.data_preview_options.polling_time #=> Integer
7792
7836
  # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.data_preview_options.record_polling_limit #=> Integer
7793
7837
  # resp.job.code_gen_configuration_nodes["NodeId"].drop_null_fields.name #=> String
@@ -7969,6 +8013,12 @@ module Aws::Glue
7969
8013
  # resp.job.code_gen_configuration_nodes["NodeId"].s3_hudi_direct_target.schema_change_policy.update_behavior #=> String, one of "UPDATE_IN_DATABASE", "LOG"
7970
8014
  # resp.job.code_gen_configuration_nodes["NodeId"].s3_hudi_direct_target.schema_change_policy.table #=> String
7971
8015
  # resp.job.code_gen_configuration_nodes["NodeId"].s3_hudi_direct_target.schema_change_policy.database #=> String
8016
+ # resp.job.code_gen_configuration_nodes["NodeId"].direct_jdbc_source.name #=> String
8017
+ # resp.job.code_gen_configuration_nodes["NodeId"].direct_jdbc_source.database #=> String
8018
+ # resp.job.code_gen_configuration_nodes["NodeId"].direct_jdbc_source.table #=> String
8019
+ # resp.job.code_gen_configuration_nodes["NodeId"].direct_jdbc_source.connection_name #=> String
8020
+ # resp.job.code_gen_configuration_nodes["NodeId"].direct_jdbc_source.connection_type #=> String, one of "sqlserver", "mysql", "oracle", "postgresql", "redshift"
8021
+ # resp.job.code_gen_configuration_nodes["NodeId"].direct_jdbc_source.redshift_tmp_dir #=> String
7972
8022
  # resp.job.execution_class #=> String, one of "FLEX", "STANDARD"
7973
8023
  # resp.job.source_control_details.provider #=> String, one of "GITHUB", "AWS_CODE_COMMIT"
7974
8024
  # resp.job.source_control_details.repository #=> String
@@ -8535,6 +8585,8 @@ module Aws::Glue
8535
8585
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.stream_arn #=> String
8536
8586
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.role_arn #=> String
8537
8587
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.role_session_name #=> String
8588
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.add_record_timestamp #=> String
8589
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.emit_consumer_lag_metrics #=> String
8538
8590
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.data_preview_options.polling_time #=> Integer
8539
8591
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.data_preview_options.record_polling_limit #=> Integer
8540
8592
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.name #=> String
@@ -8553,6 +8605,9 @@ module Aws::Glue
8553
8605
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.retry_interval_ms #=> Integer
8554
8606
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.max_offsets_per_trigger #=> Integer
8555
8607
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.min_partitions #=> Integer
8608
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.include_headers #=> Boolean
8609
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.add_record_timestamp #=> String
8610
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.emit_consumer_lag_metrics #=> String
8556
8611
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.window_size #=> Integer
8557
8612
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.detect_schema #=> Boolean
8558
8613
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.data_preview_options.polling_time #=> Integer
@@ -8580,6 +8635,8 @@ module Aws::Glue
8580
8635
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.stream_arn #=> String
8581
8636
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.role_arn #=> String
8582
8637
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.role_session_name #=> String
8638
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.add_record_timestamp #=> String
8639
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.emit_consumer_lag_metrics #=> String
8583
8640
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.data_preview_options.polling_time #=> Integer
8584
8641
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.data_preview_options.record_polling_limit #=> Integer
8585
8642
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.name #=> String
@@ -8602,6 +8659,9 @@ module Aws::Glue
8602
8659
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.retry_interval_ms #=> Integer
8603
8660
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.max_offsets_per_trigger #=> Integer
8604
8661
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.min_partitions #=> Integer
8662
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.include_headers #=> Boolean
8663
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.add_record_timestamp #=> String
8664
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.emit_consumer_lag_metrics #=> String
8605
8665
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.data_preview_options.polling_time #=> Integer
8606
8666
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.data_preview_options.record_polling_limit #=> Integer
8607
8667
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.name #=> String
@@ -8783,6 +8843,12 @@ module Aws::Glue
8783
8843
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_hudi_direct_target.schema_change_policy.update_behavior #=> String, one of "UPDATE_IN_DATABASE", "LOG"
8784
8844
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_hudi_direct_target.schema_change_policy.table #=> String
8785
8845
  # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_hudi_direct_target.schema_change_policy.database #=> String
8846
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_jdbc_source.name #=> String
8847
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_jdbc_source.database #=> String
8848
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_jdbc_source.table #=> String
8849
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_jdbc_source.connection_name #=> String
8850
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_jdbc_source.connection_type #=> String, one of "sqlserver", "mysql", "oracle", "postgresql", "redshift"
8851
+ # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_jdbc_source.redshift_tmp_dir #=> String
8786
8852
  # resp.jobs[0].execution_class #=> String, one of "FLEX", "STANDARD"
8787
8853
  # resp.jobs[0].source_control_details.provider #=> String, one of "GITHUB", "AWS_CODE_COMMIT"
8788
8854
  # resp.jobs[0].source_control_details.repository #=> String
@@ -15585,6 +15651,8 @@ module Aws::Glue
15585
15651
  # stream_arn: "EnclosedInStringProperty",
15586
15652
  # role_arn: "EnclosedInStringProperty",
15587
15653
  # role_session_name: "EnclosedInStringProperty",
15654
+ # add_record_timestamp: "EnclosedInStringProperty",
15655
+ # emit_consumer_lag_metrics: "EnclosedInStringProperty",
15588
15656
  # },
15589
15657
  # data_preview_options: {
15590
15658
  # polling_time: 1,
@@ -15609,6 +15677,9 @@ module Aws::Glue
15609
15677
  # retry_interval_ms: 1,
15610
15678
  # max_offsets_per_trigger: 1,
15611
15679
  # min_partitions: 1,
15680
+ # include_headers: false,
15681
+ # add_record_timestamp: "EnclosedInStringProperty",
15682
+ # emit_consumer_lag_metrics: "EnclosedInStringProperty",
15612
15683
  # },
15613
15684
  # window_size: 1,
15614
15685
  # detect_schema: false,
@@ -15642,6 +15713,8 @@ module Aws::Glue
15642
15713
  # stream_arn: "EnclosedInStringProperty",
15643
15714
  # role_arn: "EnclosedInStringProperty",
15644
15715
  # role_session_name: "EnclosedInStringProperty",
15716
+ # add_record_timestamp: "EnclosedInStringProperty",
15717
+ # emit_consumer_lag_metrics: "EnclosedInStringProperty",
15645
15718
  # },
15646
15719
  # data_preview_options: {
15647
15720
  # polling_time: 1,
@@ -15670,6 +15743,9 @@ module Aws::Glue
15670
15743
  # retry_interval_ms: 1,
15671
15744
  # max_offsets_per_trigger: 1,
15672
15745
  # min_partitions: 1,
15746
+ # include_headers: false,
15747
+ # add_record_timestamp: "EnclosedInStringProperty",
15748
+ # emit_consumer_lag_metrics: "EnclosedInStringProperty",
15673
15749
  # },
15674
15750
  # data_preview_options: {
15675
15751
  # polling_time: 1,
@@ -15932,6 +16008,14 @@ module Aws::Glue
15932
16008
  # database: "EnclosedInStringProperty",
15933
16009
  # },
15934
16010
  # },
16011
+ # direct_jdbc_source: {
16012
+ # name: "NodeName", # required
16013
+ # database: "EnclosedInStringProperty", # required
16014
+ # table: "EnclosedInStringProperty", # required
16015
+ # connection_name: "EnclosedInStringProperty", # required
16016
+ # connection_type: "sqlserver", # required, accepts sqlserver, mysql, oracle, postgresql, redshift
16017
+ # redshift_tmp_dir: "EnclosedInStringProperty",
16018
+ # },
15935
16019
  # },
15936
16020
  # },
15937
16021
  # execution_class: "FLEX", # accepts FLEX, STANDARD
@@ -16757,7 +16841,7 @@ module Aws::Glue
16757
16841
  params: params,
16758
16842
  config: config)
16759
16843
  context[:gem_name] = 'aws-sdk-glue'
16760
- context[:gem_version] = '1.129.0'
16844
+ context[:gem_version] = '1.131.0'
16761
16845
  Seahorse::Client::Request.new(handlers, context)
16762
16846
  end
16763
16847
 
@@ -373,6 +373,7 @@ module Aws::Glue
373
373
  DevEndpointList = Shapes::ListShape.new(name: 'DevEndpointList')
374
374
  DevEndpointNameList = Shapes::ListShape.new(name: 'DevEndpointNameList')
375
375
  DevEndpointNames = Shapes::ListShape.new(name: 'DevEndpointNames')
376
+ DirectJDBCSource = Shapes::StructureShape.new(name: 'DirectJDBCSource')
376
377
  DirectKafkaSource = Shapes::StructureShape.new(name: 'DirectKafkaSource')
377
378
  DirectKinesisSource = Shapes::StructureShape.new(name: 'DirectKinesisSource')
378
379
  DirectSchemaChangePolicy = Shapes::StructureShape.new(name: 'DirectSchemaChangePolicy')
@@ -604,6 +605,7 @@ module Aws::Glue
604
605
  InvalidInputException = Shapes::StructureShape.new(name: 'InvalidInputException')
605
606
  InvalidStateException = Shapes::StructureShape.new(name: 'InvalidStateException')
606
607
  IsVersionValid = Shapes::BooleanShape.new(name: 'IsVersionValid')
608
+ JDBCConnectionType = Shapes::StringShape.new(name: 'JDBCConnectionType')
607
609
  JDBCConnectorOptions = Shapes::StructureShape.new(name: 'JDBCConnectorOptions')
608
610
  JDBCConnectorSource = Shapes::StructureShape.new(name: 'JDBCConnectorSource')
609
611
  JDBCConnectorTarget = Shapes::StructureShape.new(name: 'JDBCConnectorTarget')
@@ -1562,6 +1564,7 @@ module Aws::Glue
1562
1564
  CodeGenConfigurationNode.add_member(:s3_hudi_source, Shapes::ShapeRef.new(shape: S3HudiSource, location_name: "S3HudiSource"))
1563
1565
  CodeGenConfigurationNode.add_member(:s3_hudi_catalog_target, Shapes::ShapeRef.new(shape: S3HudiCatalogTarget, location_name: "S3HudiCatalogTarget"))
1564
1566
  CodeGenConfigurationNode.add_member(:s3_hudi_direct_target, Shapes::ShapeRef.new(shape: S3HudiDirectTarget, location_name: "S3HudiDirectTarget"))
1567
+ CodeGenConfigurationNode.add_member(:direct_jdbc_source, Shapes::ShapeRef.new(shape: DirectJDBCSource, location_name: "DirectJDBCSource"))
1565
1568
  CodeGenConfigurationNode.struct_class = Types::CodeGenConfigurationNode
1566
1569
 
1567
1570
  CodeGenConfigurationNodes.key = Shapes::ShapeRef.new(shape: NodeId)
@@ -2504,6 +2507,14 @@ module Aws::Glue
2504
2507
 
2505
2508
  DevEndpointNames.member = Shapes::ShapeRef.new(shape: GenericString)
2506
2509
 
2510
+ DirectJDBCSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name"))
2511
+ DirectJDBCSource.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database"))
2512
+ DirectJDBCSource.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table"))
2513
+ DirectJDBCSource.add_member(:connection_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectionName"))
2514
+ DirectJDBCSource.add_member(:connection_type, Shapes::ShapeRef.new(shape: JDBCConnectionType, required: true, location_name: "ConnectionType"))
2515
+ DirectJDBCSource.add_member(:redshift_tmp_dir, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "RedshiftTmpDir"))
2516
+ DirectJDBCSource.struct_class = Types::DirectJDBCSource
2517
+
2507
2518
  DirectKafkaSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name"))
2508
2519
  DirectKafkaSource.add_member(:streaming_options, Shapes::ShapeRef.new(shape: KafkaStreamingSourceOptions, location_name: "StreamingOptions"))
2509
2520
  DirectKafkaSource.add_member(:window_size, Shapes::ShapeRef.new(shape: BoxedPositiveInt, location_name: "WindowSize", metadata: {"box"=>true}))
@@ -3587,6 +3598,9 @@ module Aws::Glue
3587
3598
  KafkaStreamingSourceOptions.add_member(:retry_interval_ms, Shapes::ShapeRef.new(shape: BoxedNonNegativeLong, location_name: "RetryIntervalMs"))
3588
3599
  KafkaStreamingSourceOptions.add_member(:max_offsets_per_trigger, Shapes::ShapeRef.new(shape: BoxedNonNegativeLong, location_name: "MaxOffsetsPerTrigger"))
3589
3600
  KafkaStreamingSourceOptions.add_member(:min_partitions, Shapes::ShapeRef.new(shape: BoxedNonNegativeInt, location_name: "MinPartitions"))
3601
+ KafkaStreamingSourceOptions.add_member(:include_headers, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "IncludeHeaders"))
3602
+ KafkaStreamingSourceOptions.add_member(:add_record_timestamp, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "AddRecordTimestamp"))
3603
+ KafkaStreamingSourceOptions.add_member(:emit_consumer_lag_metrics, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "EmitConsumerLagMetrics"))
3590
3604
  KafkaStreamingSourceOptions.struct_class = Types::KafkaStreamingSourceOptions
3591
3605
 
3592
3606
  KeyList.member = Shapes::ShapeRef.new(shape: NameString)
@@ -3615,6 +3629,8 @@ module Aws::Glue
3615
3629
  KinesisStreamingSourceOptions.add_member(:stream_arn, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "StreamArn"))
3616
3630
  KinesisStreamingSourceOptions.add_member(:role_arn, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "RoleArn"))
3617
3631
  KinesisStreamingSourceOptions.add_member(:role_session_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "RoleSessionName"))
3632
+ KinesisStreamingSourceOptions.add_member(:add_record_timestamp, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "AddRecordTimestamp"))
3633
+ KinesisStreamingSourceOptions.add_member(:emit_consumer_lag_metrics, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "EmitConsumerLagMetrics"))
3618
3634
  KinesisStreamingSourceOptions.struct_class = Types::KinesisStreamingSourceOptions
3619
3635
 
3620
3636
  LabelingSetGenerationTaskRunProperties.add_member(:output_s3_path, Shapes::ShapeRef.new(shape: UriString, location_name: "OutputS3Path"))
@@ -50,9 +50,6 @@ module Aws::Glue
50
50
 
51
51
  def initialize(options = {})
52
52
  self[:region] = options[:region]
53
- if self[:region].nil?
54
- raise ArgumentError, "Missing required EndpointParameter: :region"
55
- end
56
53
  self[:use_dual_stack] = options[:use_dual_stack]
57
54
  self[:use_dual_stack] = false if self[:use_dual_stack].nil?
58
55
  if self[:use_dual_stack].nil?
@@ -14,36 +14,39 @@ module Aws::Glue
14
14
  use_dual_stack = parameters.use_dual_stack
15
15
  use_fips = parameters.use_fips
16
16
  endpoint = parameters.endpoint
17
- if (partition_result = Aws::Endpoints::Matchers.aws_partition(region))
18
- if Aws::Endpoints::Matchers.set?(endpoint)
19
- if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true)
20
- raise ArgumentError, "Invalid Configuration: FIPS and custom endpoint are not supported"
21
- end
22
- if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true)
23
- raise ArgumentError, "Invalid Configuration: Dualstack and custom endpoint are not supported"
24
- end
25
- return Aws::Endpoints::Endpoint.new(url: endpoint, headers: {}, properties: {})
26
- end
27
- if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true)
28
- if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) && Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack"))
29
- return Aws::Endpoints::Endpoint.new(url: "https://glue-fips.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {})
30
- end
31
- raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both"
32
- end
17
+ if Aws::Endpoints::Matchers.set?(endpoint)
33
18
  if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true)
34
- if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS"))
35
- return Aws::Endpoints::Endpoint.new(url: "https://glue-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {})
36
- end
37
- raise ArgumentError, "FIPS is enabled but this partition does not support FIPS"
19
+ raise ArgumentError, "Invalid Configuration: FIPS and custom endpoint are not supported"
38
20
  end
39
21
  if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true)
40
- if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack"))
41
- return Aws::Endpoints::Endpoint.new(url: "https://glue.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {})
22
+ raise ArgumentError, "Invalid Configuration: Dualstack and custom endpoint are not supported"
23
+ end
24
+ return Aws::Endpoints::Endpoint.new(url: endpoint, headers: {}, properties: {})
25
+ end
26
+ if Aws::Endpoints::Matchers.set?(region)
27
+ if (partition_result = Aws::Endpoints::Matchers.aws_partition(region))
28
+ if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true)
29
+ if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) && Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack"))
30
+ return Aws::Endpoints::Endpoint.new(url: "https://glue-fips.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {})
31
+ end
32
+ raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both"
33
+ end
34
+ if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true)
35
+ if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS"))
36
+ return Aws::Endpoints::Endpoint.new(url: "https://glue-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {})
37
+ end
38
+ raise ArgumentError, "FIPS is enabled but this partition does not support FIPS"
39
+ end
40
+ if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true)
41
+ if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack"))
42
+ return Aws::Endpoints::Endpoint.new(url: "https://glue.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {})
43
+ end
44
+ raise ArgumentError, "DualStack is enabled but this partition does not support DualStack"
42
45
  end
43
- raise ArgumentError, "DualStack is enabled but this partition does not support DualStack"
46
+ return Aws::Endpoints::Endpoint.new(url: "https://glue.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {})
44
47
  end
45
- return Aws::Endpoints::Endpoint.new(url: "https://glue.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {})
46
48
  end
49
+ raise ArgumentError, "Invalid Configuration: Missing Region"
47
50
  raise ArgumentError, 'No endpoint could be resolved'
48
51
 
49
52
  end
@@ -1702,12 +1702,11 @@ module Aws::Glue
1702
1702
  # @return [Types::S3ParquetSource]
1703
1703
  #
1704
1704
  # @!attribute [rw] relational_catalog_source
1705
- # Specifies a Relational database data source in the Glue Data
1706
- # Catalog.
1705
+ # Specifies a relational catalog data store in the Glue Data Catalog.
1707
1706
  # @return [Types::RelationalCatalogSource]
1708
1707
  #
1709
1708
  # @!attribute [rw] dynamo_db_catalog_source
1710
- # Specifies a DynamoDB data source in the Glue Data Catalog.
1709
+ # Specifies a DynamoDBC Catalog data store in the Glue Data Catalog.
1711
1710
  # @return [Types::DynamoDBCatalogSource]
1712
1711
  #
1713
1712
  # @!attribute [rw] jdbc_connector_target
@@ -1931,6 +1930,10 @@ module Aws::Glue
1931
1930
  # Specifies a target that writes to a Hudi data source in Amazon S3.
1932
1931
  # @return [Types::S3HudiDirectTarget]
1933
1932
  #
1933
+ # @!attribute [rw] direct_jdbc_source
1934
+ # Specifies the direct JDBC source connection.
1935
+ # @return [Types::DirectJDBCSource]
1936
+ #
1934
1937
  # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CodeGenConfigurationNode AWS API Documentation
1935
1938
  #
1936
1939
  class CodeGenConfigurationNode < Struct.new(
@@ -1990,7 +1993,8 @@ module Aws::Glue
1990
1993
  :catalog_hudi_source,
1991
1994
  :s3_hudi_source,
1992
1995
  :s3_hudi_catalog_target,
1993
- :s3_hudi_direct_target)
1996
+ :s3_hudi_direct_target,
1997
+ :direct_jdbc_source)
1994
1998
  SENSITIVE = []
1995
1999
  include Aws::Structure
1996
2000
  end
@@ -6832,6 +6836,45 @@ module Aws::Glue
6832
6836
  include Aws::Structure
6833
6837
  end
6834
6838
 
6839
+ # Specifies the direct JDBC source connection.
6840
+ #
6841
+ # @!attribute [rw] name
6842
+ # The name of the JDBC source connection.
6843
+ # @return [String]
6844
+ #
6845
+ # @!attribute [rw] database
6846
+ # The database of the JDBC source connection.
6847
+ # @return [String]
6848
+ #
6849
+ # @!attribute [rw] table
6850
+ # The table of the JDBC source connection.
6851
+ # @return [String]
6852
+ #
6853
+ # @!attribute [rw] connection_name
6854
+ # The connection name of the JDBC source.
6855
+ # @return [String]
6856
+ #
6857
+ # @!attribute [rw] connection_type
6858
+ # The connection type of the JDBC source.
6859
+ # @return [String]
6860
+ #
6861
+ # @!attribute [rw] redshift_tmp_dir
6862
+ # The temp directory of the JDBC Redshift source.
6863
+ # @return [String]
6864
+ #
6865
+ # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DirectJDBCSource AWS API Documentation
6866
+ #
6867
+ class DirectJDBCSource < Struct.new(
6868
+ :name,
6869
+ :database,
6870
+ :table,
6871
+ :connection_name,
6872
+ :connection_type,
6873
+ :redshift_tmp_dir)
6874
+ SENSITIVE = []
6875
+ include Aws::Structure
6876
+ end
6877
+
6835
6878
  # Specifies an Apache Kafka data store.
6836
6879
  #
6837
6880
  # @!attribute [rw] name
@@ -12371,6 +12414,31 @@ module Aws::Glue
12371
12414
  # partitions is equal to the number of Kafka partitions.
12372
12415
  # @return [Integer]
12373
12416
  #
12417
+ # @!attribute [rw] include_headers
12418
+ # Whether to include the Kafka headers. When the option is set to
12419
+ # "true", the data output will contain an additional column named
12420
+ # "glue\_streaming\_kafka\_headers" with type `Array[Struct(key:
12421
+ # String, value: String)]`. The default value is "false". This
12422
+ # option is available in Glue version 3.0 or later only.
12423
+ # @return [Boolean]
12424
+ #
12425
+ # @!attribute [rw] add_record_timestamp
12426
+ # When this option is set to 'true', the data output will contain an
12427
+ # additional column named "\_\_src\_timestamp" that indicates the
12428
+ # time when the corresponding record received by the topic. The
12429
+ # default value is 'false'. This option is supported in Glue version
12430
+ # 4.0 or later.
12431
+ # @return [String]
12432
+ #
12433
+ # @!attribute [rw] emit_consumer_lag_metrics
12434
+ # When this option is set to 'true', for each batch, it will emit
12435
+ # the metrics for the duration between the oldest record received by
12436
+ # the topic and the time it arrives in Glue to CloudWatch. The
12437
+ # metric's name is "glue.driver.streaming.maxConsumerLagInMs". The
12438
+ # default value is 'false'. This option is supported in Glue version
12439
+ # 4.0 or later.
12440
+ # @return [String]
12441
+ #
12374
12442
  # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/KafkaStreamingSourceOptions AWS API Documentation
12375
12443
  #
12376
12444
  class KafkaStreamingSourceOptions < Struct.new(
@@ -12388,7 +12456,10 @@ module Aws::Glue
12388
12456
  :num_retries,
12389
12457
  :retry_interval_ms,
12390
12458
  :max_offsets_per_trigger,
12391
- :min_partitions)
12459
+ :min_partitions,
12460
+ :include_headers,
12461
+ :add_record_timestamp,
12462
+ :emit_consumer_lag_metrics)
12392
12463
  SENSITIVE = []
12393
12464
  include Aws::Structure
12394
12465
  end
@@ -12509,6 +12580,23 @@ module Aws::Glue
12509
12580
  # account. Used in conjunction with `"awsSTSRoleARN"`.
12510
12581
  # @return [String]
12511
12582
  #
12583
+ # @!attribute [rw] add_record_timestamp
12584
+ # When this option is set to 'true', the data output will contain an
12585
+ # additional column named "\_\_src\_timestamp" that indicates the
12586
+ # time when the corresponding record received by the stream. The
12587
+ # default value is 'false'. This option is supported in Glue version
12588
+ # 4.0 or later.
12589
+ # @return [String]
12590
+ #
12591
+ # @!attribute [rw] emit_consumer_lag_metrics
12592
+ # When this option is set to 'true', for each batch, it will emit
12593
+ # the metrics for the duration between the oldest record received by
12594
+ # the stream and the time it arrives in Glue to CloudWatch. The
12595
+ # metric's name is "glue.driver.streaming.maxConsumerLagInMs". The
12596
+ # default value is 'false'. This option is supported in Glue version
12597
+ # 4.0 or later.
12598
+ # @return [String]
12599
+ #
12512
12600
  # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/KinesisStreamingSourceOptions AWS API Documentation
12513
12601
  #
12514
12602
  class KinesisStreamingSourceOptions < Struct.new(
@@ -12529,7 +12617,9 @@ module Aws::Glue
12529
12617
  :avoid_empty_batches,
12530
12618
  :stream_arn,
12531
12619
  :role_arn,
12532
- :role_session_name)
12620
+ :role_session_name,
12621
+ :add_record_timestamp,
12622
+ :emit_consumer_lag_metrics)
12533
12623
  SENSITIVE = []
12534
12624
  include Aws::Structure
12535
12625
  end
@@ -15889,6 +15979,7 @@ module Aws::Glue
15889
15979
  # @return [String]
15890
15980
  #
15891
15981
  # @!attribute [rw] additional_options
15982
+ # Specifies additional connection options for the connector.
15892
15983
  # @return [Hash<String,String>]
15893
15984
  #
15894
15985
  # @!attribute [rw] schema_change_policy
@@ -15925,8 +16016,7 @@ module Aws::Glue
15925
16016
  # @return [Hash<String,String>]
15926
16017
  #
15927
16018
  # @!attribute [rw] additional_options
15928
- # Specifies additional connection options for the Amazon S3 data
15929
- # store.
16019
+ # Specifies additional options for the connector.
15930
16020
  # @return [Types::S3DirectSourceAdditionalOptions]
15931
16021
  #
15932
16022
  # @!attribute [rw] output_schemas
data/lib/aws-sdk-glue.rb CHANGED
@@ -52,6 +52,6 @@ require_relative 'aws-sdk-glue/customizations'
52
52
  # @!group service
53
53
  module Aws::Glue
54
54
 
55
- GEM_VERSION = '1.129.0'
55
+ GEM_VERSION = '1.131.0'
56
56
 
57
57
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-sdk-glue
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.129.0
4
+ version: 1.131.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon Web Services
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-01-19 00:00:00.000000000 Z
11
+ date: 2023-02-15 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aws-sdk-core