embulk-output-elasticsearch 0.2.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 17a82e00d22fd47a3f0921903a8b2210c0ef938c
4
- data.tar.gz: accef942b8a09e8700477fc74883693702eaae0e
3
+ metadata.gz: 775d2c8e4628c2c21f2cfcf707ed138fbc2ab55b
4
+ data.tar.gz: 7956e4ddf4ec168484598f50f3a31f8305cf7780
5
5
  SHA512:
6
- metadata.gz: 7cee36d9a5099ce50c92fdf760777e1a9e6cc960a9d543486d8aad10b0244a97e87e1396e8e653759f16a44ae77a5d9cb66b8be1682221d2b1337b900156c8c9
7
- data.tar.gz: 75288939178b00e2d612f640c07fee9f0b86b21d1b5d17e4b15c93b2be97cb32e616a042302bf653d5aef09ce94ad790a481d6a952240d3d7700f5c10a29c1d2
6
+ metadata.gz: f4283bed7a28688b10d1577d0f601c9102f02b49a0d2c79df25a4a5e3ff8b83fa03300cbd6f8a251f8c65da287df6ec2ae6e81a3797958477ae86d1f37af028b
7
+ data.tar.gz: 4db5b4ca8f467f0c3ae77ed6fdeeea579fca25252885eea99d4fce818c61bad3a8fe7659f7fea29cdd0e1f90836ea39197a3a0f561364fbd1bd5652f1bb0bee5
data/CHANGELOG.md CHANGED
@@ -1,3 +1,7 @@
1
+ ## 0.2.1 - 2016-02-05
2
+
3
+ * [maintenance] Fix bug. Force to fail jobs if nodes down while executing [#19](https://github.com/muga/embulk-output-elasticsearch/pull/19)
4
+
1
5
  ## 0.2.0 - 2016-01-26
2
6
 
3
7
  * [new feature] Support Elasticsearch 2.x [#12](https://github.com/muga/embulk-output-elasticsearch/pull/12)
data/build.gradle CHANGED
@@ -14,7 +14,7 @@ configurations {
14
14
  provided
15
15
  }
16
16
 
17
- version = "0.2.0"
17
+ version = "0.2.1"
18
18
 
19
19
  compileJava.options.encoding = 'UTF-8' // source encoding
20
20
  sourceCompatibility = 1.7
@@ -20,6 +20,7 @@ import org.elasticsearch.action.bulk.BulkResponse;
20
20
  import org.elasticsearch.action.index.IndexRequest;
21
21
  import org.elasticsearch.client.Client;
22
22
  import org.elasticsearch.client.Requests;
23
+ import org.elasticsearch.client.transport.NoNodeAvailableException;
23
24
  import org.elasticsearch.client.transport.TransportClient;
24
25
  import org.elasticsearch.cluster.metadata.AliasMetaData;
25
26
  import org.elasticsearch.cluster.metadata.AliasOrIndex;
@@ -43,6 +44,7 @@ import org.embulk.config.ConfigSource;
43
44
  import org.embulk.config.Task;
44
45
  import org.embulk.config.TaskReport;
45
46
  import org.embulk.config.TaskSource;
47
+ import org.embulk.config.UserDataException;
46
48
  import org.embulk.spi.Column;
47
49
  import org.embulk.spi.ColumnVisitor;
48
50
  import org.embulk.spi.Exec;
@@ -143,6 +145,16 @@ public class ElasticsearchOutputPlugin
143
145
  }
144
146
  log.info(String.format("Inserting data into index[%s]", task.getIndex()));
145
147
  control.run(task.dump());
148
+
149
+ if (task.getMode().equals(Mode.REPLACE)) {
150
+ try {
151
+ reAssignAlias(task.getAlias().orNull(), task.getIndex(), client);
152
+ } catch (IndexNotFoundException | InvalidAliasNameException e) {
153
+ throw new ConfigException(e);
154
+ } catch (NoNodeAvailableException e) {
155
+ throw new ConnectionException(e);
156
+ }
157
+ }
146
158
  } catch (Exception e) {
147
159
  throw Throwables.propagate(e);
148
160
  }
@@ -164,16 +176,7 @@ public class ElasticsearchOutputPlugin
164
176
  public void cleanup(TaskSource taskSource,
165
177
  Schema schema, int processorCount,
166
178
  List<TaskReport> successTaskReports)
167
- {
168
- final PluginTask task = taskSource.loadTask(PluginTask.class);
169
- if (task.getMode().equals(Mode.REPLACE)) {
170
- try (Client client = createClient(task)) {
171
- reAssignAlias(task.getAlias().orNull(), task.getIndex(), client);
172
- } catch (IndexNotFoundException | InvalidAliasNameException e) {
173
- throw new ConfigException(e);
174
- }
175
- }
176
- }
179
+ {}
177
180
 
178
181
  private Client createClient(final PluginTask task)
179
182
  {
@@ -186,8 +189,8 @@ public class ElasticsearchOutputPlugin
186
189
  for (NodeAddressTask node : nodes) {
187
190
  try {
188
191
  client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(node.getHost()), node.getPort()));
189
- } catch (UnknownHostException e) {
190
- Throwables.propagate(e);
192
+ } catch (UnknownHostException | NoNodeAvailableException e) {
193
+ throw new ConnectionException(e);
191
194
  }
192
195
  }
193
196
  return client;
@@ -226,7 +229,12 @@ public class ElasticsearchOutputPlugin
226
229
  @Override
227
230
  public void afterBulk(long executionId, BulkRequest request, Throwable failure)
228
231
  {
229
- log.warn("Got the error during bulk processing", failure);
232
+ if (failure.getClass() == NoNodeAvailableException.class) {
233
+ log.error("Got the error during bulk processing", failure);
234
+ throw new ConnectionException(failure);
235
+ } else {
236
+ log.warn("Got the error during bulk processing", failure);
237
+ }
230
238
  }
231
239
  }).setBulkActions(task.getBulkActions())
232
240
  .setBulkSize(new ByteSizeValue(task.getBulkSize()))
@@ -376,7 +384,7 @@ public class ElasticsearchOutputPlugin
376
384
  contextBuilder.endObject();
377
385
  bulkProcessor.add(newIndexRequest(getIdValue(idColumn)).source(contextBuilder));
378
386
 
379
- } catch (IOException e) {
387
+ } catch (ConnectionException | IOException e) {
380
388
  Throwables.propagate(e); // TODO error handling
381
389
  }
382
390
  }
@@ -543,4 +551,17 @@ public class ElasticsearchOutputPlugin
543
551
  Timestamp time = Exec.getTransactionTime();
544
552
  return indexName + new SimpleDateFormat("_yyyyMMdd-HHmmss").format(time.toEpochMilli());
545
553
  }
554
+
555
+ public class ConnectionException extends RuntimeException implements UserDataException
556
+ {
557
+ protected ConnectionException()
558
+ {
559
+ }
560
+
561
+ public ConnectionException(Throwable cause)
562
+ {
563
+ super(cause);
564
+ }
565
+ }
566
+
546
567
  }
@@ -168,12 +168,11 @@ public class TestElasticsearchOutputPlugin
168
168
  }
169
169
 
170
170
  @Test
171
- public void testResume()
171
+ public void testTransaction()
172
172
  {
173
173
  ConfigSource config = config();
174
174
  Schema schema = config.getNested("parser").loadConfig(CsvParserPlugin.PluginTask.class).getSchemaConfig().toSchema();
175
- PluginTask task = config.loadConfig(PluginTask.class);
176
- plugin.resume(task.dump(), schema, 0, new OutputPlugin.Control()
175
+ plugin.transaction(config, schema, 0, new OutputPlugin.Control()
177
176
  {
178
177
  @Override
179
178
  public List<TaskReport> run(TaskSource taskSource)
@@ -181,27 +180,16 @@ public class TestElasticsearchOutputPlugin
181
180
  return Lists.newArrayList(Exec.newTaskReport());
182
181
  }
183
182
  });
183
+ // no error happens
184
184
  }
185
185
 
186
186
  @Test
187
- public void testTransaction()
187
+ public void testResume()
188
188
  {
189
- ConfigSource config = Exec.newConfigSource()
190
- .set("in", inputConfig())
191
- .set("parser", parserConfig(schemaConfig()))
192
- .set("type", "elasticsearch")
193
- .set("mode", "replace")
194
- .set("nodes", ES_NODES)
195
- .set("cluster_name", ES_CLUSTER_NAME)
196
- .set("index", ES_INDEX)
197
- .set("index_type", ES_INDEX_TYPE)
198
- .set("id", ES_ID)
199
- .set("bulk_actions", ES_BULK_ACTIONS)
200
- .set("bulk_size", ES_BULK_SIZE)
201
- .set("concurrent_requests", ES_CONCURRENT_REQUESTS
202
- );
189
+ ConfigSource config = config();
203
190
  Schema schema = config.getNested("parser").loadConfig(CsvParserPlugin.PluginTask.class).getSchemaConfig().toSchema();
204
- plugin.transaction(config, schema, 0, new OutputPlugin.Control()
191
+ PluginTask task = config.loadConfig(PluginTask.class);
192
+ plugin.resume(task.dump(), schema, 0, new OutputPlugin.Control()
205
193
  {
206
194
  @Override
207
195
  public List<TaskReport> run(TaskSource taskSource)
@@ -209,6 +197,15 @@ public class TestElasticsearchOutputPlugin
209
197
  return Lists.newArrayList(Exec.newTaskReport());
210
198
  }
211
199
  });
200
+ }
201
+
202
+ @Test
203
+ public void testCleanup()
204
+ {
205
+ ConfigSource config = config();
206
+ Schema schema = config.getNested("parser").loadConfig(CsvParserPlugin.PluginTask.class).getSchemaConfig().toSchema();
207
+ PluginTask task = config.loadConfig(PluginTask.class);
208
+ plugin.cleanup(task.dump(), schema, 0, Arrays.asList(Exec.newTaskReport()));
212
209
  // no error happens
213
210
  }
214
211
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: embulk-output-elasticsearch
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.0
4
+ version: 0.2.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Muga Nishizawa
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-01-26 00:00:00.000000000 Z
11
+ date: 2016-02-05 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement
@@ -75,7 +75,7 @@ files:
75
75
  - classpath/commons-cli-1.3.1.jar
76
76
  - classpath/compress-lzf-1.0.2.jar
77
77
  - classpath/elasticsearch-2.0.0.jar
78
- - classpath/embulk-output-elasticsearch-0.2.0.jar
78
+ - classpath/embulk-output-elasticsearch-0.2.1.jar
79
79
  - classpath/HdrHistogram-2.1.6.jar
80
80
  - classpath/hppc-0.7.1.jar
81
81
  - classpath/jackson-dataformat-cbor-2.5.3.jar