embulk-output-jdbc 0.7.0 → 0.7.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 1e7b878973e6e97c5149c469886b1d11c9efc8d9
4
- data.tar.gz: 3af9509130f455d733b78de7d0805907d259bdc2
3
+ metadata.gz: 3f912491ce43b8714fa207c9751ce0732f1d860f
4
+ data.tar.gz: 09361f2b0088b3df654a741018d99c7ee3256f05
5
5
  SHA512:
6
- metadata.gz: 586210422b7503a9921dcf879ab13b2fd6691521cddb5afb37ef51349c844173f6ac4b6506b048e8f01d08f4f172724341b4e6a40097c8c508b8f17119248659
7
- data.tar.gz: cf113f39192e5ec7dbba13c33947c1095dcf03c1297a0fbfebc251c219725a4a8ffc0580551d2001ff45cf3ea865e62faa5af133a5baa2dae70f7cd109d4ce84
6
+ metadata.gz: 51aa4687726314754747e3513c3316182caf5bae198a9267699057b17cabe68ec3020451f804c0f9fa8b83d79d425c9ab7b3ea8689dd1c117819eca538c90dbe
7
+ data.tar.gz: 8e37f62b9742d79b59a4637666bbcf9443fe8ea4fb92d4aa328e235c1ea0c30b764cb2e48124a865537852547f4474c32e3e9d181f306716c5fb7cdda694e13b
data/README.md CHANGED
@@ -30,6 +30,7 @@ Generic JDBC output plugin for Embulk loads records to a database using a JDBC d
30
30
  - **value_type**: This plugin converts input column type (embulk type) into a database type to build a INSERT statement. This value_type option controls the type of the value in a INSERT statement. (string, default: depends on the sql type of the column. Available values options are: `byte`, `short`, `int`, `long`, `double`, `float`, `boolean`, `string`, `nstring`, `date`, `time`, `timestamp`, `decimal`, `json`, `null`, `pass`)
31
31
  - **timestamp_format**: If input column type (embulk type) is timestamp and value_type is `string` or `nstring`, this plugin needs to format the timestamp value into a string. This timestamp_format option is used to control the format of the timestamp. (string, default: `%Y-%m-%d %H:%M:%S.%6N`)
32
32
  - **timezone**: If input column type (embulk type) is timestamp, this plugin needs to format the timestamp value into a SQL string. In this cases, this timezone option is used to control the timezone. (string, value of default_timezone option is used by default)
33
+ - **after_load**: if set, this SQL will be executed after loading all records.
33
34
 
34
35
  ## Modes
35
36
 
@@ -108,6 +108,10 @@ public abstract class AbstractJdbcOutputPlugin
108
108
  @ConfigDefault("null")
109
109
  public Optional<List<String>> getMergeRule();
110
110
 
111
+ @Config("after_load")
112
+ @ConfigDefault("null")
113
+ public Optional<String> getAfterLoad();
114
+
111
115
  public void setActualTable(String actualTable);
112
116
  public String getActualTable();
113
117
 
@@ -394,7 +398,7 @@ public abstract class AbstractJdbcOutputPlugin
394
398
  private ConfigDiff commit(final PluginTask task,
395
399
  Schema schema, final int taskCount)
396
400
  {
397
- if (!task.getMode().isDirectModify()) { // no intermediate data if isDirectModify == true
401
+ if (!task.getMode().isDirectModify() || task.getAfterLoad().isPresent()) { // no intermediate data if isDirectModify == true
398
402
  try {
399
403
  withRetry(task, new IdempotentSqlRunnable() {
400
404
  public void run() throws SQLException
@@ -492,7 +496,7 @@ public abstract class AbstractJdbcOutputPlugin
492
496
  if (mode.tempTablePerTask()) {
493
497
  String namePrefix = generateIntermediateTableNamePrefix(task.getActualTable(), con, 3,
494
498
  task.getFeatures().getMaxTableNameLength(), task.getFeatures().getTableNameLengthSemantics());
495
- for (int i=0; i < taskCount; i++) {
499
+ for (int i = 0; i < taskCount; i++) {
496
500
  intermTableNames.add(namePrefix + String.format("%03d", i));
497
501
  }
498
502
  } else {
@@ -666,16 +670,15 @@ public abstract class AbstractJdbcOutputPlugin
666
670
  protected void doCommit(JdbcOutputConnection con, PluginTask task, int taskCount)
667
671
  throws SQLException
668
672
  {
669
- if (task.getIntermediateTables().get().isEmpty()) {
670
- return;
671
- }
672
-
673
673
  JdbcSchema schema = filterSkipColumns(task.getTargetTableSchema());
674
674
 
675
675
  switch (task.getMode()) {
676
676
  case INSERT_DIRECT:
677
677
  case MERGE_DIRECT:
678
678
  // already done
679
+ if (task.getAfterLoad().isPresent()) {
680
+ con.executeSql(task.getAfterLoad().get());
681
+ }
679
682
  break;
680
683
 
681
684
  case INSERT:
@@ -683,7 +686,7 @@ public abstract class AbstractJdbcOutputPlugin
683
686
  if (task.getNewTableSchema().isPresent()) {
684
687
  con.createTableIfNotExists(task.getActualTable(), task.getNewTableSchema().get());
685
688
  }
686
- con.collectInsert(task.getIntermediateTables().get(), schema, task.getActualTable(), false);
689
+ con.collectInsert(task.getIntermediateTables().get(), schema, task.getActualTable(), false, task.getAfterLoad());
687
690
  break;
688
691
 
689
692
  case TRUNCATE_INSERT:
@@ -691,7 +694,7 @@ public abstract class AbstractJdbcOutputPlugin
691
694
  if (task.getNewTableSchema().isPresent()) {
692
695
  con.createTableIfNotExists(task.getActualTable(), task.getNewTableSchema().get());
693
696
  }
694
- con.collectInsert(task.getIntermediateTables().get(), schema, task.getActualTable(), true);
697
+ con.collectInsert(task.getIntermediateTables().get(), schema, task.getActualTable(), true, task.getAfterLoad());
695
698
  break;
696
699
 
697
700
  case MERGE:
@@ -699,12 +702,13 @@ public abstract class AbstractJdbcOutputPlugin
699
702
  if (task.getNewTableSchema().isPresent()) {
700
703
  con.createTableIfNotExists(task.getActualTable(), task.getNewTableSchema().get());
701
704
  }
702
- con.collectMerge(task.getIntermediateTables().get(), schema, task.getActualTable(), new MergeConfig(task.getMergeKeys().get(), task.getMergeRule()));
705
+ con.collectMerge(task.getIntermediateTables().get(), schema, task.getActualTable(),
706
+ new MergeConfig(task.getMergeKeys().get(), task.getMergeRule()), task.getAfterLoad());
703
707
  break;
704
708
 
705
709
  case REPLACE:
706
710
  // swap table
707
- con.replaceTable(task.getIntermediateTables().get().get(0), schema, task.getActualTable());
711
+ con.replaceTable(task.getIntermediateTables().get().get(0), schema, task.getActualTable(), task.getAfterLoad());
708
712
  break;
709
713
  }
710
714
  }
@@ -280,9 +280,26 @@ public class JdbcOutputConnection
280
280
  throw new UnsupportedOperationException("not implemented");
281
281
  }
282
282
 
283
+ protected void executeSql(String sql) throws SQLException
284
+ {
285
+ Statement stmt = connection.createStatement();
286
+ try {
287
+ executeUpdate(stmt, sql);
288
+ commitIfNecessary(connection);
289
+ } catch (SQLException ex) {
290
+ throw safeRollback(connection, ex);
291
+ } finally {
292
+ stmt.close();
293
+ }
294
+ }
295
+
283
296
  protected void collectInsert(List<String> fromTables, JdbcSchema schema, String toTable,
284
- boolean truncateDestinationFirst) throws SQLException
297
+ boolean truncateDestinationFirst, Optional<String> additionalSql) throws SQLException
285
298
  {
299
+ if (fromTables.isEmpty()) {
300
+ return;
301
+ }
302
+
286
303
  Statement stmt = connection.createStatement();
287
304
  try {
288
305
  if (truncateDestinationFirst) {
@@ -291,6 +308,9 @@ public class JdbcOutputConnection
291
308
  }
292
309
  String sql = buildCollectInsertSql(fromTables, schema, toTable);
293
310
  executeUpdate(stmt, sql);
311
+ if (additionalSql.isPresent()) {
312
+ executeUpdate(stmt, additionalSql.get());
313
+ }
294
314
  commitIfNecessary(connection);
295
315
  } catch (SQLException ex) {
296
316
  throw safeRollback(connection, ex);
@@ -335,12 +355,20 @@ public class JdbcOutputConnection
335
355
  return sb.toString();
336
356
  }
337
357
 
338
- protected void collectMerge(List<String> fromTables, JdbcSchema schema, String toTable, MergeConfig mergeConfig) throws SQLException
358
+ protected void collectMerge(List<String> fromTables, JdbcSchema schema, String toTable, MergeConfig mergeConfig,
359
+ Optional<String> additionalSql) throws SQLException
339
360
  {
361
+ if (fromTables.isEmpty()) {
362
+ return;
363
+ }
364
+
340
365
  Statement stmt = connection.createStatement();
341
366
  try {
342
367
  String sql = buildCollectMergeSql(fromTables, schema, toTable, mergeConfig);
343
368
  executeUpdate(stmt, sql);
369
+ if (additionalSql.isPresent()) {
370
+ executeUpdate(stmt, additionalSql.get());
371
+ }
344
372
  commitIfNecessary(connection);
345
373
  } catch (SQLException ex) {
346
374
  throw safeRollback(connection, ex);
@@ -354,14 +382,15 @@ public class JdbcOutputConnection
354
382
  throw new UnsupportedOperationException("not implemented");
355
383
  }
356
384
 
357
- public void replaceTable(String fromTable, JdbcSchema schema, String toTable) throws SQLException
385
+ public void replaceTable(String fromTable, JdbcSchema schema, String toTable, Optional<String> additionalSql) throws SQLException
358
386
  {
359
387
  Statement stmt = connection.createStatement();
360
388
  try {
361
389
  dropTableIfExists(stmt, toTable);
362
-
363
390
  executeUpdate(stmt, buildRenameTableSql(fromTable, toTable));
364
-
391
+ if (additionalSql.isPresent()) {
392
+ executeUpdate(stmt, additionalSql.get());
393
+ }
365
394
  commitIfNecessary(connection);
366
395
  } catch (SQLException ex) {
367
396
  throw safeRollback(connection, ex);
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: embulk-output-jdbc
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.0
4
+ version: 0.7.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Sadayuki Furuhashi
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-10-26 00:00:00.000000000 Z
11
+ date: 2016-11-25 00:00:00.000000000 Z
12
12
  dependencies: []
13
13
  description: Inserts or updates records to a table.
14
14
  email:
@@ -19,7 +19,7 @@ extra_rdoc_files: []
19
19
  files:
20
20
  - README.md
21
21
  - build.gradle
22
- - classpath/embulk-output-jdbc-0.7.0.jar
22
+ - classpath/embulk-output-jdbc-0.7.1.jar
23
23
  - lib/embulk/output/jdbc.rb
24
24
  - src/main/java/org/embulk/output/JdbcOutputPlugin.java
25
25
  - src/main/java/org/embulk/output/jdbc/AbstractJdbcOutputPlugin.java