embulk-output-redshift 0.7.8 → 0.7.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +1 -0
- data/classpath/embulk-output-jdbc-0.7.9.jar +0 -0
- data/classpath/embulk-output-postgresql-0.7.9.jar +0 -0
- data/classpath/embulk-output-redshift-0.7.9.jar +0 -0
- data/src/main/java/org/embulk/output/RedshiftOutputPlugin.java +16 -1
- data/src/main/java/org/embulk/output/redshift/RedshiftCopyBatchInsert.java +2 -1
- data/src/main/java/org/embulk/output/redshift/RedshiftOutputConnection.java +21 -17
- metadata +5 -5
- data/classpath/embulk-output-jdbc-0.7.8.jar +0 -0
- data/classpath/embulk-output-postgresql-0.7.8.jar +0 -0
- data/classpath/embulk-output-redshift-0.7.8.jar +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 36ac2e6be278272eb350e37f52c533a7db3da42c
|
4
|
+
data.tar.gz: dca6057f28230550806b00e716ec128be9df17ca
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 43141c1afaabca1dede02262d2b796ed073b38bf9a2124fbc7d30aed5a50a23cb7ec092ca9f1d15717e0d2f4d267a7144fdf879c72ce738a5cde9ce26f02a25e
|
7
|
+
data.tar.gz: 912553f1443a79805b90f9a57c4ed01e7ddc1115eef21259ed96b76ac8129c3b91b0bc29fe5b71f840f4756a9c0548b445cbcd7d791c1593ccc6307dea1a429b
|
data/README.md
CHANGED
@@ -17,6 +17,7 @@ Redshift output plugin for Embulk loads records to Redshift.
|
|
17
17
|
- **password**: database login password (string, default: "")
|
18
18
|
- **database**: destination database name (string, required)
|
19
19
|
- **schema**: destination schema name (string, default: "public")
|
20
|
+
- **temp_schema**: schema name for intermediate tables. by default, intermediate tables will be created in the schema specified by `schema`. replace mode doesn't support temp_schema. (string, optional)
|
20
21
|
- **table**: destination table name (string, required)
|
21
22
|
- **access_key_id**: deprecated. `aws_access_key_id` should be used (see "basic" in `aws_auth_method`).
|
22
23
|
- **secret_access_key**: deprecated. `aws_secret_access_key` should be used (see "basic" in `aws_auth_method`).
|
Binary file
|
Binary file
|
Binary file
|
@@ -4,7 +4,6 @@ import java.util.Properties;
|
|
4
4
|
import java.io.IOException;
|
5
5
|
import java.sql.SQLException;
|
6
6
|
|
7
|
-
import org.embulk.output.jdbc.MergeConfig;
|
8
7
|
import org.slf4j.Logger;
|
9
8
|
import com.google.common.base.Optional;
|
10
9
|
import com.google.common.collect.ImmutableSet;
|
@@ -17,6 +16,8 @@ import org.embulk.config.ConfigDefault;
|
|
17
16
|
import org.embulk.output.jdbc.AbstractJdbcOutputPlugin;
|
18
17
|
import org.embulk.output.jdbc.BatchInsert;
|
19
18
|
import org.embulk.output.jdbc.JdbcOutputConnection;
|
19
|
+
import org.embulk.output.jdbc.MergeConfig;
|
20
|
+
import org.embulk.output.jdbc.TableIdentifier;
|
20
21
|
import org.embulk.output.redshift.RedshiftOutputConnector;
|
21
22
|
import org.embulk.output.redshift.RedshiftCopyBatchInsert;
|
22
23
|
import org.embulk.output.redshift.Ssl;
|
@@ -49,6 +50,10 @@ public class RedshiftOutputPlugin
|
|
49
50
|
@ConfigDefault("\"public\"")
|
50
51
|
public String getSchema();
|
51
52
|
|
53
|
+
@Config("temp_schema")
|
54
|
+
@ConfigDefault("null")
|
55
|
+
public Optional<String> getTempSchema();
|
56
|
+
|
52
57
|
// for backward compatibility
|
53
58
|
@Config("access_key_id")
|
54
59
|
@ConfigDefault("null")
|
@@ -153,6 +158,16 @@ public class RedshiftOutputPlugin
|
|
153
158
|
}
|
154
159
|
}
|
155
160
|
|
161
|
+
@Override
|
162
|
+
protected TableIdentifier buildIntermediateTableId(JdbcOutputConnection con, PluginTask task, String tableName) {
|
163
|
+
RedshiftPluginTask t = (RedshiftPluginTask) task;
|
164
|
+
// replace mode doesn't support temp_schema because ALTER TABLE cannot change schema of table
|
165
|
+
if (t.getTempSchema().isPresent() && t.getMode() != Mode.REPLACE) {
|
166
|
+
return new TableIdentifier(null, t.getTempSchema().get(), tableName);
|
167
|
+
}
|
168
|
+
return super.buildIntermediateTableId(con, task, tableName);
|
169
|
+
}
|
170
|
+
|
156
171
|
@Override
|
157
172
|
protected String generateIntermediateTableNamePrefix(String baseTableName, JdbcOutputConnection con,
|
158
173
|
int suffixLength, int maxLength, LengthSemantics lengthSemantics) throws SQLException {
|
@@ -18,6 +18,7 @@ import java.util.concurrent.TimeUnit;
|
|
18
18
|
import java.util.zip.GZIPOutputStream;
|
19
19
|
|
20
20
|
import org.embulk.output.jdbc.JdbcSchema;
|
21
|
+
import org.embulk.output.jdbc.TableIdentifier;
|
21
22
|
import org.embulk.output.postgresql.AbstractPostgreSQLCopyBatchInsert;
|
22
23
|
import org.embulk.spi.Exec;
|
23
24
|
import org.slf4j.Logger;
|
@@ -93,7 +94,7 @@ public class RedshiftCopyBatchInsert
|
|
93
94
|
}
|
94
95
|
|
95
96
|
@Override
|
96
|
-
public void prepare(
|
97
|
+
public void prepare(TableIdentifier loadTable, JdbcSchema insertSchema) throws SQLException
|
97
98
|
{
|
98
99
|
this.connection = connector.connect(true);
|
99
100
|
this.copySqlBeforeFrom = connection.buildCopySQLBeforeFrom(loadTable, insertSchema);
|
@@ -10,6 +10,7 @@ import org.embulk.output.jdbc.JdbcColumn;
|
|
10
10
|
import org.embulk.output.jdbc.JdbcOutputConnection;
|
11
11
|
import org.embulk.output.jdbc.JdbcSchema;
|
12
12
|
import org.embulk.output.jdbc.MergeConfig;
|
13
|
+
import org.embulk.output.jdbc.TableIdentifier;
|
13
14
|
import org.embulk.spi.Exec;
|
14
15
|
import org.slf4j.Logger;
|
15
16
|
|
@@ -30,11 +31,11 @@ public class RedshiftOutputConnection
|
|
30
31
|
// Redshift does not support DROP TABLE IF EXISTS.
|
31
32
|
// Here runs DROP TABLE and ignores errors.
|
32
33
|
@Override
|
33
|
-
public void dropTableIfExists(
|
34
|
+
public void dropTableIfExists(TableIdentifier table) throws SQLException
|
34
35
|
{
|
35
36
|
Statement stmt = connection.createStatement();
|
36
37
|
try {
|
37
|
-
String sql = String.format("DROP TABLE IF EXISTS %s",
|
38
|
+
String sql = String.format("DROP TABLE IF EXISTS %s", quoteTableIdentifier(table));
|
38
39
|
executeUpdate(stmt, sql);
|
39
40
|
commitIfNecessary(connection);
|
40
41
|
} catch (SQLException ex) {
|
@@ -49,14 +50,14 @@ public class RedshiftOutputConnection
|
|
49
50
|
// Redshift does not support DROP TABLE IF EXISTS.
|
50
51
|
// Dropping part runs DROP TABLE and ignores errors.
|
51
52
|
@Override
|
52
|
-
public void replaceTable(
|
53
|
+
public void replaceTable(TableIdentifier fromTable, JdbcSchema schema, TableIdentifier toTable, Optional<String> additionalSql) throws SQLException
|
53
54
|
{
|
54
55
|
Statement stmt = connection.createStatement();
|
55
56
|
try {
|
56
57
|
try {
|
57
58
|
StringBuilder sb = new StringBuilder();
|
58
59
|
sb.append("DROP TABLE ");
|
59
|
-
|
60
|
+
quoteTableIdentifier(sb, toTable);
|
60
61
|
String sql = sb.toString();
|
61
62
|
executeUpdate(stmt, sql);
|
62
63
|
} catch (SQLException ex) {
|
@@ -67,11 +68,12 @@ public class RedshiftOutputConnection
|
|
67
68
|
}
|
68
69
|
|
69
70
|
{
|
71
|
+
// ALTER TABLE cannot change schema of table
|
70
72
|
StringBuilder sb = new StringBuilder();
|
71
73
|
sb.append("ALTER TABLE ");
|
72
|
-
|
74
|
+
quoteTableIdentifier(sb, fromTable);
|
73
75
|
sb.append(" RENAME TO ");
|
74
|
-
quoteIdentifierString(sb, toTable);
|
76
|
+
quoteIdentifierString(sb, toTable.getTableName());
|
75
77
|
String sql = sb.toString();
|
76
78
|
executeUpdate(stmt, sql);
|
77
79
|
}
|
@@ -104,12 +106,12 @@ public class RedshiftOutputConnection
|
|
104
106
|
}
|
105
107
|
}
|
106
108
|
|
107
|
-
public String buildCopySQLBeforeFrom(
|
109
|
+
public String buildCopySQLBeforeFrom(TableIdentifier table, JdbcSchema tableSchema)
|
108
110
|
{
|
109
111
|
StringBuilder sb = new StringBuilder();
|
110
112
|
|
111
113
|
sb.append("COPY ");
|
112
|
-
|
114
|
+
quoteTableIdentifier(sb, table);
|
113
115
|
sb.append(" (");
|
114
116
|
for(int i=0; i < tableSchema.getCount(); i++) {
|
115
117
|
if(i != 0) { sb.append(", "); }
|
@@ -131,7 +133,7 @@ public class RedshiftOutputConnection
|
|
131
133
|
}
|
132
134
|
|
133
135
|
@Override
|
134
|
-
protected String buildCollectMergeSql(List<
|
136
|
+
protected String buildCollectMergeSql(List<TableIdentifier> fromTables, JdbcSchema schema, TableIdentifier toTable, MergeConfig mergeConfig) throws SQLException
|
135
137
|
{
|
136
138
|
StringBuilder sb = new StringBuilder();
|
137
139
|
|
@@ -148,7 +150,7 @@ public class RedshiftOutputConnection
|
|
148
150
|
sb.append("BEGIN TRANSACTION;");
|
149
151
|
|
150
152
|
sb.append("UPDATE ");
|
151
|
-
|
153
|
+
quoteTableIdentifier(sb, toTable);
|
152
154
|
sb.append(" SET ");
|
153
155
|
for (int i = 0; i < updateKeys.size(); i++) {
|
154
156
|
if (i != 0) { sb.append(", "); }
|
@@ -166,7 +168,7 @@ public class RedshiftOutputConnection
|
|
166
168
|
quoteIdentifierString(sb, schema.getColumnName(j));
|
167
169
|
}
|
168
170
|
sb.append(" FROM ");
|
169
|
-
|
171
|
+
quoteTableIdentifier(sb, fromTables.get(i));
|
170
172
|
}
|
171
173
|
sb.append(" ) S WHERE ");
|
172
174
|
|
@@ -175,14 +177,14 @@ public class RedshiftOutputConnection
|
|
175
177
|
sb.append("S.");
|
176
178
|
quoteIdentifierString(sb, mergeKeys.get(i));
|
177
179
|
sb.append(" = ");
|
178
|
-
|
180
|
+
quoteTableIdentifier(sb, toTable);
|
179
181
|
sb.append(".");
|
180
182
|
quoteIdentifierString(sb, mergeKeys.get(i));
|
181
183
|
}
|
182
184
|
sb.append(";");
|
183
185
|
|
184
186
|
sb.append("INSERT INTO ");
|
185
|
-
|
187
|
+
quoteTableIdentifier(sb, toTable);
|
186
188
|
sb.append(" (");
|
187
189
|
for (int i = 0; i < schema.getCount(); i++) {
|
188
190
|
if (i != 0) { sb.append(", "); }
|
@@ -197,18 +199,20 @@ public class RedshiftOutputConnection
|
|
197
199
|
quoteIdentifierString(sb, schema.getColumnName(j));
|
198
200
|
}
|
199
201
|
sb.append(" FROM ");
|
200
|
-
|
202
|
+
quoteTableIdentifier(sb, fromTables.get(i));
|
201
203
|
sb.append(" WHERE NOT EXISTS (SELECT 1 FROM ");
|
202
|
-
|
204
|
+
quoteTableIdentifier(sb, toTable);
|
205
|
+
sb.append(", ");
|
206
|
+
quoteTableIdentifier(sb, fromTables.get(i));
|
203
207
|
sb.append(" WHERE ");
|
204
208
|
|
205
209
|
for (int k = 0; k < mergeKeys.size(); k++) {
|
206
210
|
if (k != 0) { sb.append(" AND "); }
|
207
|
-
|
211
|
+
quoteTableIdentifier(sb, fromTables.get(i));
|
208
212
|
sb.append(".");
|
209
213
|
quoteIdentifierString(sb, mergeKeys.get(k));
|
210
214
|
sb.append(" = ");
|
211
|
-
|
215
|
+
quoteTableIdentifier(sb, toTable);
|
212
216
|
sb.append(".");
|
213
217
|
quoteIdentifierString(sb, mergeKeys.get(k));
|
214
218
|
}
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: embulk-output-redshift
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.7.
|
4
|
+
version: 0.7.9
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Sadayuki Furuhashi
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2017-
|
11
|
+
date: 2017-06-23 00:00:00.000000000 Z
|
12
12
|
dependencies: []
|
13
13
|
description: Inserts or updates records to a table.
|
14
14
|
email:
|
@@ -26,9 +26,9 @@ files:
|
|
26
26
|
- classpath/commons-codec-1.6.jar
|
27
27
|
- classpath/commons-logging-1.1.3.jar
|
28
28
|
- classpath/embulk-core-0.8.9.jar
|
29
|
-
- classpath/embulk-output-jdbc-0.7.
|
30
|
-
- classpath/embulk-output-postgresql-0.7.
|
31
|
-
- classpath/embulk-output-redshift-0.7.
|
29
|
+
- classpath/embulk-output-jdbc-0.7.9.jar
|
30
|
+
- classpath/embulk-output-postgresql-0.7.9.jar
|
31
|
+
- classpath/embulk-output-redshift-0.7.9.jar
|
32
32
|
- classpath/embulk-util-aws-credentials-0.2.8.jar
|
33
33
|
- classpath/httpclient-4.3.6.jar
|
34
34
|
- classpath/httpcore-4.3.3.jar
|
Binary file
|
Binary file
|
Binary file
|