embulk-output-redshift 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: 2824d8d4c60e62b49127ff8759a1c5e50b76e9bc
4
+ data.tar.gz: 6eccc67a5ac7a8971f5dd4e0a6fabc58d2ade100
5
+ SHA512:
6
+ metadata.gz: 0a6b84c2d7e0f6d9aca52604738a99e1d0ec6c60033f45f063f807766d93c05a704fc2bf945ade880352ba623d9b4f046de1b9f3b3ec50b32bef4c1dad573406
7
+ data.tar.gz: 326d1ed8ab544b6740aefa9cd1d7c4459976acfdd57c78b08b411541c36e6b1dbfe56d92a5f923936864df7564e5d7bb19cb4020307c74071a4e01b7a5bb1026
data/README.md ADDED
@@ -0,0 +1,47 @@
1
+ # Redshift output plugins for Embulk
2
+
3
+ Redshift output plugins for Embulk loads records to Redshift.
4
+
5
+ ## Overview
6
+
7
+ * **Plugin type**: output
8
+ * **Load all or nothing**: depnds on the mode:
9
+ * **insert**: no
10
+ * **replace**: yes
11
+ * **Resume supported**: no
12
+
13
+ ## Configuration
14
+
15
+ - **host**: database host name (string, required)
16
+ - **port**: database port number (integer, default: 5439)
17
+ - **user**: database login user name (string, required)
18
+ - **password**: database login password (string, default: "")
19
+ - **database**: destination database name (string, required)
20
+ - **schema**: destination name (string, default: "public")
21
+ - **table**: destination name (string, required)
22
+ - **mode**: "replace" or "insert" (string, required)
23
+ - **batch_size**: size of a single batch insert (integer, default: 16777216)
24
+ - **options**: extra connection properties (hash, default: {})
25
+
26
+ ### Example
27
+
28
+ ```yaml
29
+ out:
30
+ type: redshift
31
+ host: myinstance.us-west-2.redshift.amazonaws.com
32
+ user: pg
33
+ password: ""
34
+ database: my_database
35
+ table: my_table
36
+ access_key_id: ABCXYZ123ABCXYZ123
37
+ secret_access_key: AbCxYz123aBcXyZ123
38
+ s3_bucket: my-redshift-transfer-bucket
39
+ iam_user_name: my-s3-read-only
40
+ mode: insert
41
+ ```
42
+
43
+ ### Build
44
+
45
+ ```
46
+ $ ./gradlew gem
47
+ ```
data/build.gradle ADDED
@@ -0,0 +1,9 @@
1
+ dependencies {
2
+ compile project(':embulk-output-jdbc')
3
+ compile project(':embulk-output-postgresql')
4
+
5
+ compile "com.amazonaws:aws-java-sdk-s3:1.9.17"
6
+ compile "com.amazonaws:aws-java-sdk-sts:1.9.17"
7
+
8
+ testCompile project(':embulk-output-jdbc').sourceSets.test.output
9
+ }
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
@@ -0,0 +1,3 @@
1
+ Embulk::JavaPlugin.register_output(
2
+ :redshift, "org.embulk.output.RedshiftOutputPlugin",
3
+ File.expand_path('../../../../classpath', __FILE__))
@@ -0,0 +1,92 @@
1
+ package org.embulk.output;
2
+
3
+ import java.util.Properties;
4
+ import java.io.IOException;
5
+ import java.sql.SQLException;
6
+ import org.slf4j.Logger;
7
+ import com.amazonaws.auth.AWSCredentials;
8
+ import com.amazonaws.auth.BasicAWSCredentials;
9
+ import org.embulk.spi.Exec;
10
+ import org.embulk.config.Config;
11
+ import org.embulk.output.jdbc.AbstractJdbcOutputPlugin;
12
+ import org.embulk.output.jdbc.BatchInsert;
13
+ import org.embulk.output.redshift.RedshiftOutputConnector;
14
+ import org.embulk.output.redshift.RedshiftCopyBatchInsert;
15
+
16
+ public class RedshiftOutputPlugin
17
+ extends AbstractJdbcOutputPlugin
18
+ {
19
+ private static final String DEFAULT_SCHEMA = "public";
20
+ private static final int DEFAULT_PORT = 5439;
21
+
22
+ private final Logger logger = Exec.getLogger(RedshiftOutputPlugin.class);
23
+
24
+ public interface RedshiftPluginTask extends PluginTask
25
+ {
26
+ @Config("access_key_id")
27
+ public String getAccessKeyId();
28
+
29
+ @Config("secret_access_key")
30
+ public String getSecretAccessKey();
31
+
32
+ @Config("iam_user_name")
33
+ public String getIamUserName();
34
+
35
+ @Config("s3_bucket")
36
+ public String getS3Bucket();
37
+ }
38
+
39
+ @Override
40
+ protected Class<? extends PluginTask> getTaskClass()
41
+ {
42
+ return RedshiftPluginTask.class;
43
+ }
44
+
45
+ @Override
46
+ protected RedshiftOutputConnector getConnector(PluginTask task, boolean retryableMetadataOperation)
47
+ {
48
+ String url = String.format("jdbc:postgresql://%s:%d/%s",
49
+ task.getHost(), task.getPort().or(DEFAULT_PORT), task.getDatabase());
50
+
51
+ Properties props = new Properties();
52
+ props.setProperty("user", task.getUser());
53
+ props.setProperty("password", task.getPassword());
54
+ props.setProperty("loginTimeout", "300"); // seconds
55
+ props.setProperty("socketTimeout", "1800"); // seconds
56
+
57
+ // Enable keepalive based on tcp_keepalive_time, tcp_keepalive_intvl and tcp_keepalive_probes kernel parameters.
58
+ // Socket options TCP_KEEPCNT, TCP_KEEPIDLE, and TCP_KEEPINTVL are not configurable.
59
+ props.setProperty("tcpKeepAlive", "true");
60
+
61
+ // TODO
62
+ //switch task.getSssl() {
63
+ //when "disable":
64
+ // break;
65
+ //when "enable":
66
+ // props.setProperty("sslfactory", "org.postgresql.ssl.NonValidatingFactory"); // disable server-side validation
67
+ //when "verify":
68
+ // props.setProperty("ssl", "true");
69
+ // break;
70
+ //}
71
+
72
+ if (!retryableMetadataOperation) {
73
+ // non-retryable batch operation uses longer timeout
74
+ props.setProperty("loginTimeout", "300"); // seconds
75
+ props.setProperty("socketTimeout", "28800"); // seconds
76
+ }
77
+
78
+ props.putAll(task.getOptions());
79
+
80
+ return new RedshiftOutputConnector(url, props, task.getSchema().or(DEFAULT_SCHEMA));
81
+ }
82
+
83
+ @Override
84
+ protected BatchInsert newBatchInsert(PluginTask task) throws IOException, SQLException
85
+ {
86
+ RedshiftPluginTask rt = (RedshiftPluginTask) task;
87
+ AWSCredentials creds = new BasicAWSCredentials(
88
+ rt.getAccessKeyId(), rt.getSecretAccessKey());
89
+ return new RedshiftCopyBatchInsert(getConnector(task, true),
90
+ creds, rt.getS3Bucket(), rt.getIamUserName());
91
+ }
92
+ }
@@ -0,0 +1,216 @@
1
+ package org.embulk.output.redshift;
2
+
3
+ import java.util.zip.GZIPOutputStream;
4
+ import java.util.concurrent.Callable;
5
+ import java.util.UUID;
6
+ import java.io.File;
7
+ import java.io.IOException;
8
+ import java.io.FileOutputStream;
9
+ import java.io.OutputStreamWriter;
10
+ import java.io.Closeable;
11
+ import java.io.Writer;
12
+ import java.io.BufferedWriter;
13
+ import java.sql.Connection;
14
+ import java.sql.SQLException;
15
+ import com.amazonaws.auth.AWSCredentials;
16
+ import com.amazonaws.auth.BasicSessionCredentials;
17
+ import com.amazonaws.auth.policy.Policy;
18
+ import com.amazonaws.auth.policy.Resource;
19
+ import com.amazonaws.auth.policy.Statement;
20
+ import com.amazonaws.auth.policy.Statement.Effect;
21
+ import com.amazonaws.auth.policy.actions.S3Actions;
22
+ import com.amazonaws.services.s3.AmazonS3Client;
23
+ import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient;
24
+ import com.amazonaws.services.securitytoken.model.GetFederationTokenRequest;
25
+ import com.amazonaws.services.securitytoken.model.GetFederationTokenResult;
26
+ import com.amazonaws.services.securitytoken.model.Credentials;
27
+ import org.slf4j.Logger;
28
+ import org.embulk.spi.Exec;
29
+ import org.embulk.output.jdbc.JdbcSchema;
30
+ import org.embulk.output.postgresql.AbstractPostgreSQLCopyBatchInsert;
31
+
32
+ public class RedshiftCopyBatchInsert
33
+ extends AbstractPostgreSQLCopyBatchInsert
34
+ {
35
+ private final Logger logger = Exec.getLogger(RedshiftCopyBatchInsert.class);
36
+ private final RedshiftOutputConnector connector;
37
+ private final AWSCredentials awsCredentials;
38
+ private final String s3BucketName;
39
+ private final String iamReaderUserName;
40
+ private final AmazonS3Client s3;
41
+ private final AWSSecurityTokenServiceClient sts;
42
+
43
+ private RedshiftOutputConnection connection = null;
44
+ private String copySqlBeforeFrom = null;
45
+ private long totalRows;
46
+ private int fileCount;
47
+
48
+ public static final String COPY_AFTER_FROM = "GZIP DELIMITER '\\t' NULL '\\N' ESCAPE TRUNCATECOLUMNS ACCEPTINVCHARS STATUPDATE OFF COMPUPDATE OFF";
49
+
50
+ public RedshiftCopyBatchInsert(RedshiftOutputConnector connector,
51
+ AWSCredentials awsCredentials, String s3BucketName,
52
+ String iamReaderUserName) throws IOException, SQLException
53
+ {
54
+ super();
55
+ this.connector = connector;
56
+ this.awsCredentials = awsCredentials;
57
+ this.s3BucketName = s3BucketName;
58
+ this.iamReaderUserName = iamReaderUserName;
59
+ this.s3 = new AmazonS3Client(awsCredentials); // TODO options
60
+ this.sts = new AWSSecurityTokenServiceClient(awsCredentials); // options
61
+ }
62
+
63
+ @Override
64
+ public void prepare(String loadTable, JdbcSchema insertSchema) throws SQLException
65
+ {
66
+ this.connection = connector.connect(true);
67
+ this.copySqlBeforeFrom = connection.buildCopySQLBeforeFrom(loadTable, insertSchema);
68
+ logger.info("Copy SQL: "+copySqlBeforeFrom+" ? "+COPY_AFTER_FROM);
69
+ }
70
+
71
+ @Override
72
+ protected BufferedWriter openWriter(File newFile) throws IOException
73
+ {
74
+ // Redshift supports gzip
75
+ return new BufferedWriter(
76
+ new OutputStreamWriter(
77
+ new GZIPOutputStream(new FileOutputStream(newFile)),
78
+ FILE_CHARSET)
79
+ );
80
+ }
81
+
82
+ @Override
83
+ public void flush() throws IOException, SQLException
84
+ {
85
+ File file = closeCurrentFile(); // flush buffered data in writer
86
+
87
+ // TODO multi-threading
88
+ new UploadAndCopyTask(file, batchRows, UUID.randomUUID().toString()).call();
89
+ new DeleteFileFinalizer(file).close();
90
+
91
+ fileCount++;
92
+ totalRows += batchRows;
93
+ batchRows = 0;
94
+
95
+ openNewFile();
96
+ file.delete();
97
+ }
98
+
99
+ @Override
100
+ public void finish() throws IOException, SQLException
101
+ {
102
+ super.finish();
103
+ logger.info("Loaded {} files.", fileCount);
104
+ }
105
+
106
+ @Override
107
+ public void close() throws IOException, SQLException
108
+ {
109
+ s3.shutdown();
110
+ closeCurrentFile().delete();
111
+ if (connection != null) {
112
+ connection.close();
113
+ connection = null;
114
+ }
115
+ }
116
+
117
+ private BasicSessionCredentials generateReaderSessionCredentials(String s3KeyName)
118
+ {
119
+ Policy policy = new Policy()
120
+ .withStatements(
121
+ new Statement(Effect.Allow)
122
+ .withActions(S3Actions.ListObjects)
123
+ .withResources(new Resource("arn:aws:s3:::"+s3BucketName)),
124
+ new Statement(Effect.Allow)
125
+ .withActions(S3Actions.GetObject)
126
+ .withResources(new Resource("arn:aws:s3:::"+s3BucketName+"/"+s3KeyName)) // TODO encode file name using percent encoding
127
+ );
128
+ GetFederationTokenRequest req = new GetFederationTokenRequest();
129
+ req.setDurationSeconds(86400); // 3600 - 129600
130
+ req.setName(iamReaderUserName);
131
+ req.setPolicy(policy.toJson());
132
+
133
+ GetFederationTokenResult res = sts.getFederationToken(req);
134
+ Credentials c = res.getCredentials();
135
+
136
+ return new BasicSessionCredentials(
137
+ c.getAccessKeyId(),
138
+ c.getSecretAccessKey(),
139
+ c.getSessionToken());
140
+ }
141
+
142
+ private class UploadAndCopyTask implements Callable<Void>
143
+ {
144
+ private final File file;
145
+ private final int batchRows;
146
+ private final String s3KeyName;
147
+
148
+ public UploadAndCopyTask(File file, int batchRows, String s3KeyName)
149
+ {
150
+ this.file = file;
151
+ this.batchRows = batchRows;
152
+ this.s3KeyName = s3KeyName;
153
+ }
154
+
155
+ public Void call() throws SQLException {
156
+ logger.info(String.format("Uploading file id %s to S3 (%,d bytes %,d rows)",
157
+ s3KeyName, file.length(), batchRows));
158
+ s3.putObject(s3BucketName, s3KeyName, file);
159
+
160
+ RedshiftOutputConnection con = connector.connect(true);
161
+ try {
162
+ logger.info("Running COPY from file {}", s3KeyName);
163
+
164
+ // create temporary credential right before COPY operation because
165
+ // it has timeout.
166
+ // TODO skip this step if iamReaderUserName is not set
167
+ BasicSessionCredentials creds = generateReaderSessionCredentials(s3KeyName);
168
+
169
+ long startTime = System.currentTimeMillis();
170
+ con.runCopy(buildCopySQL(creds));
171
+ double seconds = (System.currentTimeMillis() - startTime) / 1000.0;
172
+
173
+ logger.info(String.format("Loaded file %s (%.2f seconds for COPY)", s3KeyName, seconds));
174
+
175
+ } finally {
176
+ con.close();
177
+ }
178
+
179
+ return null;
180
+ }
181
+
182
+ private String buildCopySQL(BasicSessionCredentials creds)
183
+ {
184
+ StringBuilder sb = new StringBuilder();
185
+ sb.append(copySqlBeforeFrom);
186
+ sb.append(" FROM 's3://");
187
+ sb.append(s3BucketName);
188
+ sb.append("/");
189
+ sb.append(s3KeyName);
190
+ sb.append("' CREDENTIALS '");
191
+ sb.append("aws_access_key_id=");
192
+ sb.append(creds.getAWSAccessKeyId());
193
+ sb.append(";aws_secret_access_key=");
194
+ sb.append(creds.getAWSSecretKey());
195
+ sb.append(";token=");
196
+ sb.append(creds.getSessionToken());
197
+ sb.append("' ");
198
+ sb.append(COPY_AFTER_FROM);
199
+ return sb.toString();
200
+ }
201
+ }
202
+
203
+ private static class DeleteFileFinalizer implements Closeable
204
+ {
205
+ private File file;
206
+
207
+ public DeleteFileFinalizer(File file) {
208
+ this.file = file;
209
+ }
210
+
211
+ @Override
212
+ public void close() throws IOException {
213
+ file.delete();
214
+ }
215
+ }
216
+ }
@@ -0,0 +1,123 @@
1
+ package org.embulk.output.redshift;
2
+
3
+ import java.sql.Connection;
4
+ import java.sql.SQLException;
5
+ import java.sql.Statement;
6
+ import org.slf4j.Logger;
7
+ import org.embulk.spi.Exec;
8
+ import org.embulk.output.jdbc.JdbcOutputConnection;
9
+ import org.embulk.output.jdbc.JdbcColumn;
10
+ import org.embulk.output.jdbc.JdbcSchema;
11
+
12
+ public class RedshiftOutputConnection
13
+ extends JdbcOutputConnection
14
+ {
15
+ private final Logger logger = Exec.getLogger(RedshiftOutputConnection.class);
16
+
17
+ public RedshiftOutputConnection(Connection connection, String schemaName, boolean autoCommit)
18
+ throws SQLException
19
+ {
20
+ super(connection, schemaName);
21
+ connection.setAutoCommit(autoCommit);
22
+ }
23
+
24
+ // Redshift does not support DROP TABLE IF EXISTS.
25
+ // Here runs DROP TABLE and ignores errors.
26
+ @Override
27
+ public void dropTableIfExists(String tableName) throws SQLException
28
+ {
29
+ Statement stmt = connection.createStatement();
30
+ try {
31
+ String sql = String.format("DROP TABLE IF EXISTS %s", quoteIdentifierString(tableName));
32
+ executeUpdate(stmt, sql);
33
+ connection.commit();
34
+ } catch (SQLException ex) {
35
+ // ignore errors.
36
+ // TODO here should ignore only 'table "XXX" does not exist' errors.
37
+ connection.rollback();
38
+ } finally {
39
+ stmt.close();
40
+ }
41
+ }
42
+
43
+ // Redshift does not support DROP TABLE IF EXISTS.
44
+ // Dropping part runs DROP TABLE and ignores errors.
45
+ @Override
46
+ public void replaceTable(String fromTable, JdbcSchema schema, String toTable) throws SQLException
47
+ {
48
+ Statement stmt = connection.createStatement();
49
+ try {
50
+ try {
51
+ StringBuilder sb = new StringBuilder();
52
+ sb.append("DROP TABLE ");
53
+ quoteIdentifierString(sb, toTable);
54
+ String sql = sb.toString();
55
+ executeUpdate(stmt, sql);
56
+ } catch (SQLException ex) {
57
+ // ignore errors.
58
+ // TODO here should ignore only 'table "XXX" does not exist' errors.
59
+ // rollback or comimt is required to recover failed transaction
60
+ connection.rollback();
61
+ }
62
+
63
+ {
64
+ StringBuilder sb = new StringBuilder();
65
+ sb.append("ALTER TABLE ");
66
+ quoteIdentifierString(sb, fromTable);
67
+ sb.append(" RENAME TO ");
68
+ quoteIdentifierString(sb, toTable);
69
+ String sql = sb.toString();
70
+ executeUpdate(stmt, sql);
71
+ }
72
+
73
+ connection.commit();
74
+ } catch (SQLException ex) {
75
+ connection.rollback();
76
+ throw ex;
77
+ } finally {
78
+ stmt.close();
79
+ }
80
+ }
81
+
82
+ @Override
83
+ protected String convertTypeName(String typeName)
84
+ {
85
+ // Redshift does not support TEXT type.
86
+ switch(typeName) {
87
+ case "CLOB":
88
+ return "VARCHAR(65535)";
89
+ case "TEXT":
90
+ return "VARCHAR(65535)";
91
+ case "BLOB":
92
+ return "BYTEA";
93
+ default:
94
+ return typeName;
95
+ }
96
+ }
97
+
98
+ public String buildCopySQLBeforeFrom(String tableName, JdbcSchema tableSchema)
99
+ {
100
+ StringBuilder sb = new StringBuilder();
101
+
102
+ sb.append("COPY ");
103
+ quoteIdentifierString(sb, tableName);
104
+ sb.append(" (");
105
+ for(int i=0; i < tableSchema.getCount(); i++) {
106
+ if(i != 0) { sb.append(", "); }
107
+ quoteIdentifierString(sb, tableSchema.getColumnName(i));
108
+ }
109
+ sb.append(")");
110
+
111
+ return sb.toString();
112
+ }
113
+
114
+ public void runCopy(String sql) throws SQLException
115
+ {
116
+ Statement stmt = connection.createStatement();
117
+ try {
118
+ stmt.executeUpdate(sql);
119
+ } finally {
120
+ stmt.close();
121
+ }
122
+ }
123
+ }
@@ -0,0 +1,40 @@
1
+ package org.embulk.output.redshift;
2
+
3
+ import java.util.Properties;
4
+ import java.sql.Driver;
5
+ import java.sql.Connection;
6
+ import java.sql.SQLException;
7
+ import org.embulk.output.jdbc.JdbcOutputConnector;
8
+ import org.embulk.output.jdbc.JdbcOutputConnection;
9
+
10
+ public class RedshiftOutputConnector
11
+ implements JdbcOutputConnector
12
+ {
13
+ private static final Driver driver = new org.postgresql.Driver();
14
+
15
+ private final String url;
16
+ private final Properties properties;
17
+ private final String schemaName;
18
+
19
+ public RedshiftOutputConnector(String url, Properties properties, String schemaName)
20
+ {
21
+ this.url = url;
22
+ this.properties = properties;
23
+ this.schemaName = schemaName;
24
+ }
25
+
26
+ @Override
27
+ public RedshiftOutputConnection connect(boolean autoCommit) throws SQLException
28
+ {
29
+ Connection c = driver.connect(url, properties);
30
+ try {
31
+ RedshiftOutputConnection con = new RedshiftOutputConnection(c, schemaName, autoCommit);
32
+ c = null;
33
+ return con;
34
+ } finally {
35
+ if (c != null) {
36
+ c.close();
37
+ }
38
+ }
39
+ }
40
+ }
metadata ADDED
@@ -0,0 +1,67 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: embulk-output-redshift
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: ruby
6
+ authors:
7
+ - FURUHASHI Sadayuki
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2015-02-16 00:00:00.000000000 Z
12
+ dependencies: []
13
+ description: JDBC output plugin is an Embulk plugin that loads records to JDBC read by any input plugins. Search the input plugins by "embulk-input" keyword.
14
+ email:
15
+ - frsyuki@users.sourceforge.jp
16
+ executables: []
17
+ extensions: []
18
+ extra_rdoc_files: []
19
+ files:
20
+ - README.md
21
+ - build.gradle
22
+ - lib/embulk/output/redshift.rb
23
+ - src/main/java/org/embulk/output/RedshiftOutputPlugin.java
24
+ - src/main/java/org/embulk/output/redshift/RedshiftCopyBatchInsert.java
25
+ - src/main/java/org/embulk/output/redshift/RedshiftOutputConnection.java
26
+ - src/main/java/org/embulk/output/redshift/RedshiftOutputConnector.java
27
+ - classpath/aws-java-sdk-core-1.9.17.jar
28
+ - classpath/aws-java-sdk-kms-1.9.17.jar
29
+ - classpath/aws-java-sdk-s3-1.9.17.jar
30
+ - classpath/aws-java-sdk-sts-1.9.17.jar
31
+ - classpath/commons-codec-1.6.jar
32
+ - classpath/commons-logging-1.1.3.jar
33
+ - classpath/embulk-output-jdbc-0.1.0.jar
34
+ - classpath/embulk-output-postgresql-0.1.0.jar
35
+ - classpath/embulk-output-redshift-0.1.0.jar
36
+ - classpath/httpclient-4.3.4.jar
37
+ - classpath/httpcore-4.3.2.jar
38
+ - classpath/jna-4.1.0.jar
39
+ - classpath/jna-platform-4.1.0.jar
40
+ - classpath/postgresql-9.4-1200-jdbc41.jar
41
+ - classpath/slf4j-simple-1.7.7.jar
42
+ - classpath/waffle-jna-1.7.jar
43
+ homepage: https://github.com/embulk/embulk-output-jdbc
44
+ licenses:
45
+ - Apache 2.0
46
+ metadata: {}
47
+ post_install_message:
48
+ rdoc_options: []
49
+ require_paths:
50
+ - lib
51
+ required_ruby_version: !ruby/object:Gem::Requirement
52
+ requirements:
53
+ - - '>='
54
+ - !ruby/object:Gem::Version
55
+ version: '0'
56
+ required_rubygems_version: !ruby/object:Gem::Requirement
57
+ requirements:
58
+ - - '>='
59
+ - !ruby/object:Gem::Version
60
+ version: '0'
61
+ requirements: []
62
+ rubyforge_project:
63
+ rubygems_version: 2.1.9
64
+ signing_key:
65
+ specification_version: 4
66
+ summary: JDBC output plugin for Embulk
67
+ test_files: []