embulk-output-redshift 0.4.0 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,214 +1,218 @@
1
- package org.embulk.output.redshift;
2
-
3
- import java.util.zip.GZIPOutputStream;
4
- import java.util.concurrent.Callable;
5
- import java.util.UUID;
6
- import java.io.File;
7
- import java.io.IOException;
8
- import java.io.FileOutputStream;
9
- import java.io.OutputStreamWriter;
10
- import java.io.Closeable;
11
- import java.io.Writer;
12
- import java.io.BufferedWriter;
13
- import java.sql.Connection;
14
- import java.sql.SQLException;
15
- import com.amazonaws.auth.AWSCredentialsProvider;
16
- import com.amazonaws.auth.BasicSessionCredentials;
17
- import com.amazonaws.auth.policy.Policy;
18
- import com.amazonaws.auth.policy.Resource;
19
- import com.amazonaws.auth.policy.Statement;
20
- import com.amazonaws.auth.policy.Statement.Effect;
21
- import com.amazonaws.auth.policy.actions.S3Actions;
22
- import com.amazonaws.services.s3.AmazonS3Client;
23
- import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient;
24
- import com.amazonaws.services.securitytoken.model.GetFederationTokenRequest;
25
- import com.amazonaws.services.securitytoken.model.GetFederationTokenResult;
26
- import com.amazonaws.services.securitytoken.model.Credentials;
27
- import org.slf4j.Logger;
28
- import org.embulk.spi.Exec;
29
- import org.embulk.output.jdbc.JdbcSchema;
30
- import org.embulk.output.postgresql.AbstractPostgreSQLCopyBatchInsert;
31
-
32
- public class RedshiftCopyBatchInsert
33
- extends AbstractPostgreSQLCopyBatchInsert
34
- {
35
- private final Logger logger = Exec.getLogger(RedshiftCopyBatchInsert.class);
36
- private final RedshiftOutputConnector connector;
37
- private final String s3BucketName;
38
- private final String iamReaderUserName;
39
- private final AmazonS3Client s3;
40
- private final AWSSecurityTokenServiceClient sts;
41
-
42
- private RedshiftOutputConnection connection = null;
43
- private String copySqlBeforeFrom = null;
44
- private long totalRows;
45
- private int fileCount;
46
-
47
- public static final String COPY_AFTER_FROM = "GZIP DELIMITER '\\t' NULL '\\\\N' ESCAPE TRUNCATECOLUMNS ACCEPTINVCHARS STATUPDATE OFF COMPUPDATE OFF";
48
-
49
- public RedshiftCopyBatchInsert(RedshiftOutputConnector connector,
50
- AWSCredentialsProvider credentialsProvider, String s3BucketName,
51
- String iamReaderUserName) throws IOException, SQLException
52
- {
53
- super();
54
- this.connector = connector;
55
- this.s3BucketName = s3BucketName;
56
- this.iamReaderUserName = iamReaderUserName;
57
- this.s3 = new AmazonS3Client(credentialsProvider); // TODO options
58
- this.sts = new AWSSecurityTokenServiceClient(credentialsProvider); // options
59
- }
60
-
61
- @Override
62
- public void prepare(String loadTable, JdbcSchema insertSchema) throws SQLException
63
- {
64
- this.connection = connector.connect(true);
65
- this.copySqlBeforeFrom = connection.buildCopySQLBeforeFrom(loadTable, insertSchema);
66
- logger.info("Copy SQL: "+copySqlBeforeFrom+" ? "+COPY_AFTER_FROM);
67
- }
68
-
69
- @Override
70
- protected BufferedWriter openWriter(File newFile) throws IOException
71
- {
72
- // Redshift supports gzip
73
- return new BufferedWriter(
74
- new OutputStreamWriter(
75
- new GZIPOutputStream(new FileOutputStream(newFile)),
76
- FILE_CHARSET)
77
- );
78
- }
79
-
80
- @Override
81
- public void flush() throws IOException, SQLException
82
- {
83
- File file = closeCurrentFile(); // flush buffered data in writer
84
-
85
- // TODO multi-threading
86
- new UploadAndCopyTask(file, batchRows, UUID.randomUUID().toString()).call();
87
- new DeleteFileFinalizer(file).close();
88
-
89
- fileCount++;
90
- totalRows += batchRows;
91
- batchRows = 0;
92
-
93
- openNewFile();
94
- file.delete();
95
- }
96
-
97
- @Override
98
- public void finish() throws IOException, SQLException
99
- {
100
- super.finish();
101
- logger.info("Loaded {} files.", fileCount);
102
- }
103
-
104
- @Override
105
- public void close() throws IOException, SQLException
106
- {
107
- s3.shutdown();
108
- closeCurrentFile().delete();
109
- if (connection != null) {
110
- connection.close();
111
- connection = null;
112
- }
113
- }
114
-
115
- private BasicSessionCredentials generateReaderSessionCredentials(String s3KeyName)
116
- {
117
- Policy policy = new Policy()
118
- .withStatements(
119
- new Statement(Effect.Allow)
120
- .withActions(S3Actions.ListObjects)
121
- .withResources(new Resource("arn:aws:s3:::"+s3BucketName)),
122
- new Statement(Effect.Allow)
123
- .withActions(S3Actions.GetObject)
124
- .withResources(new Resource("arn:aws:s3:::"+s3BucketName+"/"+s3KeyName)) // TODO encode file name using percent encoding
125
- );
126
- GetFederationTokenRequest req = new GetFederationTokenRequest();
127
- req.setDurationSeconds(86400); // 3600 - 129600
128
- req.setName(iamReaderUserName);
129
- req.setPolicy(policy.toJson());
130
-
131
- GetFederationTokenResult res = sts.getFederationToken(req);
132
- Credentials c = res.getCredentials();
133
-
134
- return new BasicSessionCredentials(
135
- c.getAccessKeyId(),
136
- c.getSecretAccessKey(),
137
- c.getSessionToken());
138
- }
139
-
140
- private class UploadAndCopyTask implements Callable<Void>
141
- {
142
- private final File file;
143
- private final int batchRows;
144
- private final String s3KeyName;
145
-
146
- public UploadAndCopyTask(File file, int batchRows, String s3KeyName)
147
- {
148
- this.file = file;
149
- this.batchRows = batchRows;
150
- this.s3KeyName = s3KeyName;
151
- }
152
-
153
- public Void call() throws SQLException {
154
- logger.info(String.format("Uploading file id %s to S3 (%,d bytes %,d rows)",
155
- s3KeyName, file.length(), batchRows));
156
- s3.putObject(s3BucketName, s3KeyName, file);
157
-
158
- RedshiftOutputConnection con = connector.connect(true);
159
- try {
160
- logger.info("Running COPY from file {}", s3KeyName);
161
-
162
- // create temporary credential right before COPY operation because
163
- // it has timeout.
164
- // TODO skip this step if iamReaderUserName is not set
165
- BasicSessionCredentials creds = generateReaderSessionCredentials(s3KeyName);
166
-
167
- long startTime = System.currentTimeMillis();
168
- con.runCopy(buildCopySQL(creds));
169
- double seconds = (System.currentTimeMillis() - startTime) / 1000.0;
170
-
171
- logger.info(String.format("Loaded file %s (%.2f seconds for COPY)", s3KeyName, seconds));
172
-
173
- } finally {
174
- con.close();
175
- }
176
-
177
- return null;
178
- }
179
-
180
- private String buildCopySQL(BasicSessionCredentials creds)
181
- {
182
- StringBuilder sb = new StringBuilder();
183
- sb.append(copySqlBeforeFrom);
184
- sb.append(" FROM 's3://");
185
- sb.append(s3BucketName);
186
- sb.append("/");
187
- sb.append(s3KeyName);
188
- sb.append("' CREDENTIALS '");
189
- sb.append("aws_access_key_id=");
190
- sb.append(creds.getAWSAccessKeyId());
191
- sb.append(";aws_secret_access_key=");
192
- sb.append(creds.getAWSSecretKey());
193
- sb.append(";token=");
194
- sb.append(creds.getSessionToken());
195
- sb.append("' ");
196
- sb.append(COPY_AFTER_FROM);
197
- return sb.toString();
198
- }
199
- }
200
-
201
- private static class DeleteFileFinalizer implements Closeable
202
- {
203
- private File file;
204
-
205
- public DeleteFileFinalizer(File file) {
206
- this.file = file;
207
- }
208
-
209
- @Override
210
- public void close() throws IOException {
211
- file.delete();
212
- }
213
- }
214
- }
1
+ package org.embulk.output.redshift;
2
+
3
+ import java.util.zip.GZIPOutputStream;
4
+ import java.util.concurrent.Callable;
5
+ import java.util.UUID;
6
+ import java.io.File;
7
+ import java.io.IOException;
8
+ import java.io.FileOutputStream;
9
+ import java.io.OutputStreamWriter;
10
+ import java.io.Closeable;
11
+ import java.io.BufferedWriter;
12
+ import java.sql.SQLException;
13
+ import com.amazonaws.auth.AWSCredentialsProvider;
14
+ import com.amazonaws.auth.BasicSessionCredentials;
15
+ import com.amazonaws.auth.policy.Policy;
16
+ import com.amazonaws.auth.policy.Resource;
17
+ import com.amazonaws.auth.policy.Statement;
18
+ import com.amazonaws.auth.policy.Statement.Effect;
19
+ import com.amazonaws.auth.policy.actions.S3Actions;
20
+ import com.amazonaws.services.s3.AmazonS3Client;
21
+ import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient;
22
+ import com.amazonaws.services.securitytoken.model.GetFederationTokenRequest;
23
+ import com.amazonaws.services.securitytoken.model.GetFederationTokenResult;
24
+ import com.amazonaws.services.securitytoken.model.Credentials;
25
+ import org.slf4j.Logger;
26
+ import org.embulk.spi.Exec;
27
+ import org.embulk.output.jdbc.JdbcSchema;
28
+ import org.embulk.output.postgresql.AbstractPostgreSQLCopyBatchInsert;
29
+
30
+ public class RedshiftCopyBatchInsert
31
+ extends AbstractPostgreSQLCopyBatchInsert
32
+ {
33
+ private final Logger logger = Exec.getLogger(RedshiftCopyBatchInsert.class);
34
+ private final RedshiftOutputConnector connector;
35
+ private final String s3BucketName;
36
+ private final String s3KeyPrefix;
37
+ private final String iamReaderUserName;
38
+ private final AmazonS3Client s3;
39
+ private final AWSSecurityTokenServiceClient sts;
40
+
41
+ private RedshiftOutputConnection connection = null;
42
+ private String copySqlBeforeFrom = null;
43
+ private long totalRows;
44
+ private int fileCount;
45
+
46
+ public static final String COPY_AFTER_FROM = "GZIP DELIMITER '\\t' NULL '\\\\N' ESCAPE TRUNCATECOLUMNS ACCEPTINVCHARS STATUPDATE OFF COMPUPDATE OFF";
47
+
48
+ public RedshiftCopyBatchInsert(RedshiftOutputConnector connector,
49
+ AWSCredentialsProvider credentialsProvider, String s3BucketName, String s3KeyPrefix,
50
+ String iamReaderUserName) throws IOException, SQLException
51
+ {
52
+ super();
53
+ this.connector = connector;
54
+ this.s3BucketName = s3BucketName;
55
+ if (s3KeyPrefix.isEmpty() || s3KeyPrefix.endsWith("/")) {
56
+ this.s3KeyPrefix = s3KeyPrefix;
57
+ } else {
58
+ this.s3KeyPrefix = s3KeyPrefix + "/";
59
+ }
60
+ this.iamReaderUserName = iamReaderUserName;
61
+ this.s3 = new AmazonS3Client(credentialsProvider); // TODO options
62
+ this.sts = new AWSSecurityTokenServiceClient(credentialsProvider); // options
63
+ }
64
+
65
+ @Override
66
+ public void prepare(String loadTable, JdbcSchema insertSchema) throws SQLException
67
+ {
68
+ this.connection = connector.connect(true);
69
+ this.copySqlBeforeFrom = connection.buildCopySQLBeforeFrom(loadTable, insertSchema);
70
+ logger.info("Copy SQL: "+copySqlBeforeFrom+" ? "+COPY_AFTER_FROM);
71
+ }
72
+
73
+ @Override
74
+ protected BufferedWriter openWriter(File newFile) throws IOException
75
+ {
76
+ // Redshift supports gzip
77
+ return new BufferedWriter(
78
+ new OutputStreamWriter(
79
+ new GZIPOutputStream(new FileOutputStream(newFile)),
80
+ FILE_CHARSET)
81
+ );
82
+ }
83
+
84
+ @Override
85
+ public void flush() throws IOException, SQLException
86
+ {
87
+ File file = closeCurrentFile(); // flush buffered data in writer
88
+
89
+ // TODO multi-threading
90
+ new UploadAndCopyTask(file, batchRows, s3KeyPrefix + UUID.randomUUID().toString()).call();
91
+ new DeleteFileFinalizer(file).close();
92
+
93
+ fileCount++;
94
+ totalRows += batchRows;
95
+ batchRows = 0;
96
+
97
+ openNewFile();
98
+ file.delete();
99
+ }
100
+
101
+ @Override
102
+ public void finish() throws IOException, SQLException
103
+ {
104
+ super.finish();
105
+ logger.info("Loaded {} files.", fileCount);
106
+ }
107
+
108
+ @Override
109
+ public void close() throws IOException, SQLException
110
+ {
111
+ s3.shutdown();
112
+ closeCurrentFile().delete();
113
+ if (connection != null) {
114
+ connection.close();
115
+ connection = null;
116
+ }
117
+ }
118
+
119
+ private BasicSessionCredentials generateReaderSessionCredentials(String s3KeyName)
120
+ {
121
+ Policy policy = new Policy()
122
+ .withStatements(
123
+ new Statement(Effect.Allow)
124
+ .withActions(S3Actions.ListObjects)
125
+ .withResources(new Resource("arn:aws:s3:::"+s3BucketName)),
126
+ new Statement(Effect.Allow)
127
+ .withActions(S3Actions.GetObject)
128
+ .withResources(new Resource("arn:aws:s3:::"+s3BucketName+"/"+s3KeyName)) // TODO encode file name using percent encoding
129
+ );
130
+ GetFederationTokenRequest req = new GetFederationTokenRequest();
131
+ req.setDurationSeconds(86400); // 3600 - 129600
132
+ req.setName(iamReaderUserName);
133
+ req.setPolicy(policy.toJson());
134
+
135
+ GetFederationTokenResult res = sts.getFederationToken(req);
136
+ Credentials c = res.getCredentials();
137
+
138
+ return new BasicSessionCredentials(
139
+ c.getAccessKeyId(),
140
+ c.getSecretAccessKey(),
141
+ c.getSessionToken());
142
+ }
143
+
144
+ private class UploadAndCopyTask implements Callable<Void>
145
+ {
146
+ private final File file;
147
+ private final int batchRows;
148
+ private final String s3KeyName;
149
+
150
+ public UploadAndCopyTask(File file, int batchRows, String s3KeyName)
151
+ {
152
+ this.file = file;
153
+ this.batchRows = batchRows;
154
+ this.s3KeyName = s3KeyName;
155
+ }
156
+
157
+ public Void call() throws SQLException {
158
+ logger.info(String.format("Uploading file id %s to S3 (%,d bytes %,d rows)",
159
+ s3KeyName, file.length(), batchRows));
160
+ s3.putObject(s3BucketName, s3KeyName, file);
161
+
162
+ RedshiftOutputConnection con = connector.connect(true);
163
+ try {
164
+ logger.info("Running COPY from file {}", s3KeyName);
165
+
166
+ // create temporary credential right before COPY operation because
167
+ // it has timeout.
168
+ // TODO skip this step if iamReaderUserName is not set
169
+ BasicSessionCredentials creds = generateReaderSessionCredentials(s3KeyName);
170
+
171
+ long startTime = System.currentTimeMillis();
172
+ con.runCopy(buildCopySQL(creds));
173
+ double seconds = (System.currentTimeMillis() - startTime) / 1000.0;
174
+
175
+ logger.info(String.format("Loaded file %s (%.2f seconds for COPY)", s3KeyName, seconds));
176
+
177
+ } finally {
178
+ con.close();
179
+ }
180
+
181
+ return null;
182
+ }
183
+
184
+ private String buildCopySQL(BasicSessionCredentials creds)
185
+ {
186
+ StringBuilder sb = new StringBuilder();
187
+ sb.append(copySqlBeforeFrom);
188
+ sb.append(" FROM 's3://");
189
+ sb.append(s3BucketName);
190
+ sb.append("/");
191
+ sb.append(s3KeyName);
192
+ sb.append("' CREDENTIALS '");
193
+ sb.append("aws_access_key_id=");
194
+ sb.append(creds.getAWSAccessKeyId());
195
+ sb.append(";aws_secret_access_key=");
196
+ sb.append(creds.getAWSSecretKey());
197
+ sb.append(";token=");
198
+ sb.append(creds.getSessionToken());
199
+ sb.append("' ");
200
+ sb.append(COPY_AFTER_FROM);
201
+ return sb.toString();
202
+ }
203
+ }
204
+
205
+ private static class DeleteFileFinalizer implements Closeable
206
+ {
207
+ private File file;
208
+
209
+ public DeleteFileFinalizer(File file) {
210
+ this.file = file;
211
+ }
212
+
213
+ @Override
214
+ public void close() throws IOException {
215
+ file.delete();
216
+ }
217
+ }
218
+ }
@@ -1,122 +1,122 @@
1
- package org.embulk.output.redshift;
2
-
3
- import java.sql.Connection;
4
- import java.sql.SQLException;
5
- import java.sql.Statement;
6
- import org.slf4j.Logger;
7
- import org.embulk.spi.Exec;
8
- import org.embulk.output.jdbc.JdbcOutputConnection;
9
- import org.embulk.output.jdbc.JdbcColumn;
10
- import org.embulk.output.jdbc.JdbcSchema;
11
-
12
- public class RedshiftOutputConnection
13
- extends JdbcOutputConnection
14
- {
15
- private final Logger logger = Exec.getLogger(RedshiftOutputConnection.class);
16
-
17
- public RedshiftOutputConnection(Connection connection, String schemaName, boolean autoCommit)
18
- throws SQLException
19
- {
20
- super(connection, schemaName);
21
- connection.setAutoCommit(autoCommit);
22
- }
23
-
24
- // Redshift does not support DROP TABLE IF EXISTS.
25
- // Here runs DROP TABLE and ignores errors.
26
- @Override
27
- public void dropTableIfExists(String tableName) throws SQLException
28
- {
29
- Statement stmt = connection.createStatement();
30
- try {
31
- String sql = String.format("DROP TABLE IF EXISTS %s", quoteIdentifierString(tableName));
32
- executeUpdate(stmt, sql);
33
- commitIfNecessary(connection);
34
- } catch (SQLException ex) {
35
- // ignore errors.
36
- // TODO here should ignore only 'table "XXX" does not exist' errors.
37
- SQLException ignored = safeRollback(connection, ex);
38
- } finally {
39
- stmt.close();
40
- }
41
- }
42
-
43
- // Redshift does not support DROP TABLE IF EXISTS.
44
- // Dropping part runs DROP TABLE and ignores errors.
45
- @Override
46
- public void replaceTable(String fromTable, JdbcSchema schema, String toTable) throws SQLException
47
- {
48
- Statement stmt = connection.createStatement();
49
- try {
50
- try {
51
- StringBuilder sb = new StringBuilder();
52
- sb.append("DROP TABLE ");
53
- quoteIdentifierString(sb, toTable);
54
- String sql = sb.toString();
55
- executeUpdate(stmt, sql);
56
- } catch (SQLException ex) {
57
- // ignore errors.
58
- // TODO here should ignore only 'table "XXX" does not exist' errors.
59
- // rollback or comimt is required to recover failed transaction
60
- SQLException ignored = safeRollback(connection, ex);
61
- }
62
-
63
- {
64
- StringBuilder sb = new StringBuilder();
65
- sb.append("ALTER TABLE ");
66
- quoteIdentifierString(sb, fromTable);
67
- sb.append(" RENAME TO ");
68
- quoteIdentifierString(sb, toTable);
69
- String sql = sb.toString();
70
- executeUpdate(stmt, sql);
71
- }
72
-
73
- commitIfNecessary(connection);
74
- } catch (SQLException ex) {
75
- throw safeRollback(connection, ex);
76
- } finally {
77
- stmt.close();
78
- }
79
- }
80
-
81
- @Override
82
- protected String buildColumnTypeName(JdbcColumn c)
83
- {
84
- // Redshift does not support TEXT type.
85
- switch(c.getSimpleTypeName()) {
86
- case "CLOB":
87
- return "VARCHAR(65535)";
88
- case "TEXT":
89
- return "VARCHAR(65535)";
90
- case "BLOB":
91
- return "BYTEA";
92
- default:
93
- return super.buildColumnTypeName(c);
94
- }
95
- }
96
-
97
- public String buildCopySQLBeforeFrom(String tableName, JdbcSchema tableSchema)
98
- {
99
- StringBuilder sb = new StringBuilder();
100
-
101
- sb.append("COPY ");
102
- quoteIdentifierString(sb, tableName);
103
- sb.append(" (");
104
- for(int i=0; i < tableSchema.getCount(); i++) {
105
- if(i != 0) { sb.append(", "); }
106
- quoteIdentifierString(sb, tableSchema.getColumnName(i));
107
- }
108
- sb.append(")");
109
-
110
- return sb.toString();
111
- }
112
-
113
- public void runCopy(String sql) throws SQLException
114
- {
115
- Statement stmt = connection.createStatement();
116
- try {
117
- stmt.executeUpdate(sql);
118
- } finally {
119
- stmt.close();
120
- }
121
- }
122
- }
1
+ package org.embulk.output.redshift;
2
+
3
+ import java.sql.Connection;
4
+ import java.sql.SQLException;
5
+ import java.sql.Statement;
6
+ import org.slf4j.Logger;
7
+ import org.embulk.spi.Exec;
8
+ import org.embulk.output.jdbc.JdbcOutputConnection;
9
+ import org.embulk.output.jdbc.JdbcColumn;
10
+ import org.embulk.output.jdbc.JdbcSchema;
11
+
12
+ public class RedshiftOutputConnection
13
+ extends JdbcOutputConnection
14
+ {
15
+ private final Logger logger = Exec.getLogger(RedshiftOutputConnection.class);
16
+
17
+ public RedshiftOutputConnection(Connection connection, String schemaName, boolean autoCommit)
18
+ throws SQLException
19
+ {
20
+ super(connection, schemaName);
21
+ connection.setAutoCommit(autoCommit);
22
+ }
23
+
24
+ // Redshift does not support DROP TABLE IF EXISTS.
25
+ // Here runs DROP TABLE and ignores errors.
26
+ @Override
27
+ public void dropTableIfExists(String tableName) throws SQLException
28
+ {
29
+ Statement stmt = connection.createStatement();
30
+ try {
31
+ String sql = String.format("DROP TABLE IF EXISTS %s", quoteIdentifierString(tableName));
32
+ executeUpdate(stmt, sql);
33
+ commitIfNecessary(connection);
34
+ } catch (SQLException ex) {
35
+ // ignore errors.
36
+ // TODO here should ignore only 'table "XXX" does not exist' errors.
37
+ SQLException ignored = safeRollback(connection, ex);
38
+ } finally {
39
+ stmt.close();
40
+ }
41
+ }
42
+
43
+ // Redshift does not support DROP TABLE IF EXISTS.
44
+ // Dropping part runs DROP TABLE and ignores errors.
45
+ @Override
46
+ public void replaceTable(String fromTable, JdbcSchema schema, String toTable) throws SQLException
47
+ {
48
+ Statement stmt = connection.createStatement();
49
+ try {
50
+ try {
51
+ StringBuilder sb = new StringBuilder();
52
+ sb.append("DROP TABLE ");
53
+ quoteIdentifierString(sb, toTable);
54
+ String sql = sb.toString();
55
+ executeUpdate(stmt, sql);
56
+ } catch (SQLException ex) {
57
+ // ignore errors.
58
+ // TODO here should ignore only 'table "XXX" does not exist' errors.
59
+ // rollback or comimt is required to recover failed transaction
60
+ SQLException ignored = safeRollback(connection, ex);
61
+ }
62
+
63
+ {
64
+ StringBuilder sb = new StringBuilder();
65
+ sb.append("ALTER TABLE ");
66
+ quoteIdentifierString(sb, fromTable);
67
+ sb.append(" RENAME TO ");
68
+ quoteIdentifierString(sb, toTable);
69
+ String sql = sb.toString();
70
+ executeUpdate(stmt, sql);
71
+ }
72
+
73
+ commitIfNecessary(connection);
74
+ } catch (SQLException ex) {
75
+ throw safeRollback(connection, ex);
76
+ } finally {
77
+ stmt.close();
78
+ }
79
+ }
80
+
81
+ @Override
82
+ protected String buildColumnTypeName(JdbcColumn c)
83
+ {
84
+ // Redshift does not support TEXT type.
85
+ switch(c.getSimpleTypeName()) {
86
+ case "CLOB":
87
+ return "VARCHAR(65535)";
88
+ case "TEXT":
89
+ return "VARCHAR(65535)";
90
+ case "BLOB":
91
+ return "BYTEA";
92
+ default:
93
+ return super.buildColumnTypeName(c);
94
+ }
95
+ }
96
+
97
+ public String buildCopySQLBeforeFrom(String tableName, JdbcSchema tableSchema)
98
+ {
99
+ StringBuilder sb = new StringBuilder();
100
+
101
+ sb.append("COPY ");
102
+ quoteIdentifierString(sb, tableName);
103
+ sb.append(" (");
104
+ for(int i=0; i < tableSchema.getCount(); i++) {
105
+ if(i != 0) { sb.append(", "); }
106
+ quoteIdentifierString(sb, tableSchema.getColumnName(i));
107
+ }
108
+ sb.append(")");
109
+
110
+ return sb.toString();
111
+ }
112
+
113
+ public void runCopy(String sql) throws SQLException
114
+ {
115
+ Statement stmt = connection.createStatement();
116
+ try {
117
+ stmt.executeUpdate(sql);
118
+ } finally {
119
+ stmt.close();
120
+ }
121
+ }
122
+ }