@terascope/elasticsearch-api 4.12.3 → 4.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/index.d.ts +70 -0
- package/dist/src/index.d.ts.map +1 -0
- package/{index.js → dist/src/index.js} +334 -550
- package/dist/src/index.js.map +1 -0
- package/package.json +13 -3
- package/jest.config.js +0 -8
- package/test/api-spec.js +0 -1265
- package/test/bulk-send-dlq-spec.js +0 -96
- package/test/bulk-send-limit-spec.js +0 -49
- package/test/retry-spec.js +0 -36
- package/types/index.d.ts +0 -81
|
@@ -1,21 +1,9 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
import {
|
|
5
|
-
isTest, TSError, isFatalError,
|
|
6
|
-
parseError, getBackoffDelay, isRetryableError,
|
|
7
|
-
get, toNumber, isString, isSimpleObject,
|
|
8
|
-
castArray, flatten, toBoolean,
|
|
9
|
-
uniq, random, cloneDeep, DataEntity,
|
|
10
|
-
isDeepEqual, getTypeOf, isProd
|
|
11
|
-
} from '@terascope/utils';
|
|
12
|
-
import { ElasticsearchDistribution } from '@terascope/types';
|
|
13
|
-
|
|
1
|
+
import { isTest, TSError, isFatalError, parseError, getBackoffDelay, isRetryableError, get, toNumber, isString, isSimpleObject, castArray, flatten, toBoolean, uniq, random, cloneDeep, DataEntity, isDeepEqual, getTypeOf, isProd } from '@terascope/utils';
|
|
2
|
+
import { ElasticsearchDistribution, } from '@terascope/types';
|
|
3
|
+
// @ts-expect-error TODO: do we still need this after getting rid of es6?
|
|
14
4
|
import('setimmediate');
|
|
15
|
-
|
|
16
5
|
const DOCUMENT_EXISTS = 409;
|
|
17
6
|
const TOO_MANY_REQUESTS = 429;
|
|
18
|
-
|
|
19
7
|
// Module to manage persistence in Elasticsearch.
|
|
20
8
|
// All functions in this module return promises that must be resolved to get the final result.
|
|
21
9
|
export default function elasticsearchApi(client, logger, _opConfig) {
|
|
@@ -26,26 +14,19 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
26
14
|
if (!logger) {
|
|
27
15
|
throw new Error('Elasticsearch API requires logger');
|
|
28
16
|
}
|
|
29
|
-
|
|
30
|
-
const warning = _warn(
|
|
31
|
-
logger,
|
|
32
|
-
'The elasticsearch cluster queues are overloaded, resubmitting failed queries from bulk'
|
|
33
|
-
);
|
|
34
|
-
|
|
17
|
+
const warning = _warn(logger, 'The elasticsearch cluster queues are overloaded, resubmitting failed queries from bulk');
|
|
35
18
|
const retryStart = get(client, '__testing.start', 5000);
|
|
36
19
|
const retryLimit = get(client, '__testing.limit', 10000);
|
|
37
|
-
|
|
38
20
|
const { connection = 'unknown' } = config;
|
|
39
|
-
|
|
40
21
|
async function count(query) {
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
22
|
+
const countQuery = {
|
|
23
|
+
...query,
|
|
24
|
+
size: 0
|
|
25
|
+
};
|
|
26
|
+
const response = await _searchES(countQuery);
|
|
44
27
|
const data = get(response, 'hits.total.value', get(response, 'hits.total'));
|
|
45
|
-
|
|
46
28
|
return data;
|
|
47
29
|
}
|
|
48
|
-
|
|
49
30
|
function convertDocToDataEntity(doc) {
|
|
50
31
|
const now = Date.now();
|
|
51
32
|
const metadata = {
|
|
@@ -64,137 +45,119 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
64
45
|
};
|
|
65
46
|
return DataEntity.make(doc._source, metadata);
|
|
66
47
|
}
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
} = query;
|
|
74
|
-
|
|
48
|
+
async function search(query) {
|
|
49
|
+
const {
|
|
50
|
+
// @ts-expect-error this can be removed when es6 is not supported
|
|
51
|
+
_sourceInclude, _source_includes: oldSourIncludes,
|
|
52
|
+
// @ts-expect-error
|
|
53
|
+
_sourceExclude, _source_excludes: oldSourExcludes, ...safeQuery } = query;
|
|
75
54
|
const sourceIncludes = _sourceInclude || oldSourIncludes;
|
|
76
55
|
const sourceExcludes = _sourceExclude || oldSourExcludes;
|
|
77
|
-
|
|
78
56
|
if (sourceIncludes) {
|
|
57
|
+
// @ts-expect-error
|
|
79
58
|
safeQuery._source_includes = sourceIncludes;
|
|
80
59
|
}
|
|
81
|
-
|
|
82
60
|
if (sourceExcludes) {
|
|
61
|
+
// @ts-expect-error
|
|
83
62
|
safeQuery._source_excludes = sourceExcludes;
|
|
84
63
|
}
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
});
|
|
64
|
+
const data = await _searchES(safeQuery);
|
|
65
|
+
if (config.full_response) {
|
|
66
|
+
return data;
|
|
67
|
+
}
|
|
68
|
+
if (!data.hits.hits)
|
|
69
|
+
return [];
|
|
70
|
+
return data.hits.hits.map(convertDocToDataEntity);
|
|
93
71
|
}
|
|
94
|
-
|
|
95
72
|
function _makeRequest(clientBase, endpoint, query, fnNamePrefix) {
|
|
96
73
|
return new Promise((resolve, reject) => {
|
|
97
74
|
const fnName = `${fnNamePrefix || ''}->${endpoint}()`;
|
|
98
75
|
const errHandler = _errorHandler(_runRequest, query, reject, fnName);
|
|
99
|
-
|
|
100
76
|
function _runRequest() {
|
|
101
77
|
clientBase[endpoint](query)
|
|
102
78
|
.then((rawResponse) => {
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
79
|
+
const response = get(rawResponse, 'body', rawResponse);
|
|
80
|
+
resolve(response);
|
|
81
|
+
})
|
|
106
82
|
.catch(errHandler);
|
|
107
83
|
}
|
|
108
|
-
|
|
109
84
|
waitForClient(() => _runRequest(), reject);
|
|
110
85
|
});
|
|
111
86
|
}
|
|
112
|
-
|
|
113
|
-
function _clientRequest(endpoint, query) {
|
|
87
|
+
async function _clientRequest(endpoint, query) {
|
|
114
88
|
return _makeRequest(client, endpoint, query);
|
|
115
89
|
}
|
|
116
|
-
|
|
117
|
-
function _clientIndicesRequest(endpoint, query) {
|
|
90
|
+
async function _clientIndicesRequest(endpoint, query) {
|
|
118
91
|
return _makeRequest(client.indices, endpoint, query, 'indices');
|
|
119
92
|
}
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
.map(convertDocToDataEntity);
|
|
128
|
-
});
|
|
93
|
+
async function mget(query, fullResponse = false) {
|
|
94
|
+
const results = await _clientRequest('mget', _adjustTypeForEs7(query));
|
|
95
|
+
if (fullResponse)
|
|
96
|
+
return results;
|
|
97
|
+
return results.docs
|
|
98
|
+
.filter((doc) => doc.found)
|
|
99
|
+
.map(convertDocToDataEntity);
|
|
129
100
|
}
|
|
130
|
-
|
|
131
|
-
function getFn(query, fullResponse = false) {
|
|
101
|
+
async function getFn(query, fullResponse = false) {
|
|
132
102
|
if (fullResponse) {
|
|
133
103
|
return _clientRequest('get', query);
|
|
134
104
|
}
|
|
135
|
-
|
|
136
|
-
|
|
105
|
+
const records = await _clientRequest('get', query);
|
|
106
|
+
return convertDocToDataEntity(records);
|
|
137
107
|
}
|
|
138
|
-
|
|
139
|
-
function indexFn(query) {
|
|
108
|
+
async function indexFn(query) {
|
|
140
109
|
return _clientRequest('index', _adjustTypeForEs7(query));
|
|
141
110
|
}
|
|
142
|
-
|
|
143
|
-
function indexWithId(query) {
|
|
111
|
+
async function indexWithId(query) {
|
|
144
112
|
return _clientRequest('index', _adjustTypeForEs7(query)).then(() => query.body);
|
|
145
113
|
}
|
|
146
|
-
|
|
147
|
-
function create(query) {
|
|
114
|
+
async function create(query) {
|
|
148
115
|
return _clientRequest('create', _adjustTypeForEs7(query)).then(() => query.body);
|
|
149
116
|
}
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
117
|
+
async function update(query) {
|
|
118
|
+
// TODO this does not seem right
|
|
119
|
+
await _clientRequest('update', _adjustTypeForEs7(query));
|
|
120
|
+
// @ts-expect-error
|
|
121
|
+
return query.body.doc;
|
|
153
122
|
}
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
return
|
|
123
|
+
async function remove(query) {
|
|
124
|
+
const result = await _clientRequest('delete', _adjustTypeForEs7(query));
|
|
125
|
+
return result.found;
|
|
157
126
|
}
|
|
158
|
-
|
|
159
|
-
function indexExists(query) {
|
|
127
|
+
async function indexExists(query) {
|
|
160
128
|
return _clientIndicesRequest('exists', query);
|
|
161
129
|
}
|
|
162
|
-
|
|
163
|
-
function indexCreate(query) {
|
|
130
|
+
async function indexCreate(query) {
|
|
164
131
|
const params = _fixMappingRequest(query, false);
|
|
165
132
|
return _clientIndicesRequest('create', params);
|
|
166
133
|
}
|
|
167
|
-
|
|
168
|
-
function indexRefresh(query) {
|
|
134
|
+
async function indexRefresh(query) {
|
|
169
135
|
return _clientIndicesRequest('refresh', query);
|
|
170
136
|
}
|
|
171
|
-
|
|
172
|
-
function indexRecovery(query) {
|
|
137
|
+
async function indexRecovery(query) {
|
|
173
138
|
return _clientIndicesRequest('recovery', query);
|
|
174
139
|
}
|
|
175
|
-
|
|
176
|
-
function nodeInfo() {
|
|
140
|
+
async function nodeInfo() {
|
|
177
141
|
return client.nodes.info();
|
|
178
142
|
}
|
|
179
|
-
|
|
180
|
-
function nodeStats() {
|
|
143
|
+
async function nodeStats() {
|
|
181
144
|
return client.nodes.stats();
|
|
182
145
|
}
|
|
183
|
-
|
|
184
146
|
function _verifyIndex(indexObj, name) {
|
|
185
147
|
let wasFound = false;
|
|
186
148
|
const results = [];
|
|
187
149
|
const regex = RegExp(name);
|
|
188
|
-
|
|
189
150
|
// exact match of index
|
|
190
151
|
if (indexObj[name]) {
|
|
191
152
|
wasFound = true;
|
|
192
153
|
if (indexObj[name].settings.index.max_result_window) {
|
|
193
154
|
results.push({ name, windowSize: indexObj[name].settings.index.max_result_window });
|
|
194
|
-
}
|
|
155
|
+
}
|
|
156
|
+
else {
|
|
195
157
|
results.push({ name, windowSize: 10000 });
|
|
196
158
|
}
|
|
197
|
-
}
|
|
159
|
+
}
|
|
160
|
+
else {
|
|
198
161
|
// check to see if regex picks up indices
|
|
199
162
|
Object.entries(indexObj).forEach(([key, value]) => {
|
|
200
163
|
if (key.match(regex) !== null) {
|
|
@@ -204,18 +167,18 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
204
167
|
name: key,
|
|
205
168
|
windowSize: value.settings.index.max_result_window,
|
|
206
169
|
});
|
|
207
|
-
}
|
|
170
|
+
}
|
|
171
|
+
else {
|
|
208
172
|
results.push({ name: key, windowSize: 10000 });
|
|
209
173
|
}
|
|
210
174
|
}
|
|
211
175
|
});
|
|
212
176
|
}
|
|
213
|
-
|
|
214
177
|
return { found: wasFound, indexWindowSize: results };
|
|
215
178
|
}
|
|
216
|
-
|
|
217
|
-
function version() {
|
|
179
|
+
async function version() {
|
|
218
180
|
const wildCardRegex = /\*/g;
|
|
181
|
+
// @ts-expect-error this expects index to exist
|
|
219
182
|
const isWildCardRegexSearch = config.index.match(wildCardRegex);
|
|
220
183
|
// We cannot reliable search index queries with wildcards
|
|
221
184
|
// for existence or max_result_window, it could be
|
|
@@ -224,43 +187,33 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
224
187
|
// A regular regex query will not error, it will just return
|
|
225
188
|
// no results which is not always an error
|
|
226
189
|
if (isWildCardRegexSearch !== null) {
|
|
227
|
-
logger.warn(
|
|
228
|
-
`Running a regex or cross cluster search for ${config.index}, there is no reliable way to verify index and max_result_window`
|
|
229
|
-
);
|
|
190
|
+
logger.warn(`Running a regex or cross cluster search for ${config.index}, there is no reliable way to verify index and max_result_window`);
|
|
230
191
|
return Promise.resolve(true);
|
|
231
192
|
}
|
|
232
|
-
|
|
233
193
|
return client.indices
|
|
234
194
|
.getSettings({})
|
|
235
195
|
.then((results) => {
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
return Promise.resolve();
|
|
253
|
-
})
|
|
196
|
+
const settingsData = results.body && results.meta ? results.body : results;
|
|
197
|
+
const resultIndex = _verifyIndex(settingsData, config.index);
|
|
198
|
+
if (resultIndex.found) {
|
|
199
|
+
resultIndex.indexWindowSize.forEach((ind) => {
|
|
200
|
+
logger.warn(`max_result_window for index: ${ind.name} is set at ${ind.windowSize}. On very large indices it is possible that a slice can not be divided to stay below this limit. If that occurs an error will be thrown by Elasticsearch and the slice can not be processed. Increasing max_result_window in the Elasticsearch index settings will resolve the problem.`);
|
|
201
|
+
});
|
|
202
|
+
}
|
|
203
|
+
else {
|
|
204
|
+
const error = new TSError('index specified in reader does not exist', {
|
|
205
|
+
statusCode: 404,
|
|
206
|
+
});
|
|
207
|
+
return Promise.reject(error);
|
|
208
|
+
}
|
|
209
|
+
return Promise.resolve();
|
|
210
|
+
})
|
|
254
211
|
.catch((err) => Promise.reject(new TSError(err)));
|
|
255
212
|
}
|
|
256
|
-
|
|
257
|
-
function putTemplate(template, name) {
|
|
213
|
+
async function putTemplate(template, name) {
|
|
258
214
|
const params = _fixMappingRequest({ body: template, name }, true);
|
|
259
|
-
return _clientIndicesRequest('putTemplate', params)
|
|
260
|
-
(results) => results
|
|
261
|
-
);
|
|
215
|
+
return _clientIndicesRequest('putTemplate', params);
|
|
262
216
|
}
|
|
263
|
-
|
|
264
217
|
/**
|
|
265
218
|
* When the bulk request has errors this will find the actions
|
|
266
219
|
* records to retry.
|
|
@@ -275,11 +228,9 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
275
228
|
function _filterRetryRecords(actionRecords, result) {
|
|
276
229
|
const retry = [];
|
|
277
230
|
const { items } = result;
|
|
278
|
-
|
|
279
231
|
let nonRetriableError = false;
|
|
280
232
|
let reason = '';
|
|
281
233
|
let successful = 0;
|
|
282
|
-
|
|
283
234
|
for (let i = 0; i < items.length; i++) {
|
|
284
235
|
// key could either be create or delete etc, just want the actual data at the value spot
|
|
285
236
|
const item = Object.values(items[i])[0];
|
|
@@ -289,7 +240,6 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
289
240
|
if (item.status === DOCUMENT_EXISTS) {
|
|
290
241
|
continue;
|
|
291
242
|
}
|
|
292
|
-
|
|
293
243
|
if (item.status === TOO_MANY_REQUESTS || item.error.type === 'es_rejected_execution_exception') {
|
|
294
244
|
if (actionRecords[i] == null) {
|
|
295
245
|
// this error should not happen in production,
|
|
@@ -299,46 +249,42 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
299
249
|
// the index in the item list will match the index in the
|
|
300
250
|
// input records
|
|
301
251
|
retry.push(actionRecords[i]);
|
|
302
|
-
}
|
|
303
|
-
|
|
304
|
-
&& item.error.type !== 'document_missing_exception'
|
|
305
|
-
) {
|
|
252
|
+
}
|
|
253
|
+
else if (item.error.type !== 'document_already_exists_exception'
|
|
254
|
+
&& item.error.type !== 'document_missing_exception') {
|
|
306
255
|
nonRetriableError = true;
|
|
307
256
|
reason = `${item.error.type}--${item.error.reason}`;
|
|
308
|
-
|
|
309
257
|
if (config._dead_letter_action === 'kafka_dead_letter') {
|
|
258
|
+
// @ts-expect-error
|
|
310
259
|
actionRecords[i].data.setMetadata('_bulk_sender_rejection', reason);
|
|
311
260
|
continue;
|
|
312
261
|
}
|
|
313
|
-
|
|
314
262
|
break;
|
|
315
263
|
}
|
|
316
|
-
}
|
|
264
|
+
}
|
|
265
|
+
else if (item.status == null || item.status < 400) {
|
|
317
266
|
successful++;
|
|
318
267
|
}
|
|
319
268
|
}
|
|
320
|
-
|
|
321
269
|
if (nonRetriableError) {
|
|
322
270
|
// if dlq active still attempt the retries
|
|
323
271
|
const retryOnError = config._dead_letter_action === 'kafka_dead_letter' ? retry : [];
|
|
324
|
-
|
|
325
272
|
return {
|
|
326
273
|
retry: retryOnError, successful, error: true, reason
|
|
327
274
|
};
|
|
328
275
|
}
|
|
329
|
-
|
|
330
276
|
return { retry, successful, error: false };
|
|
331
277
|
}
|
|
332
|
-
|
|
333
278
|
function getFirstKey(obj) {
|
|
334
279
|
return Object.keys(obj)[0];
|
|
335
280
|
}
|
|
336
|
-
|
|
337
281
|
/**
|
|
338
282
|
* @param data {Array<{ action: data }>}
|
|
339
283
|
* @returns {Promise<number>}
|
|
340
284
|
*/
|
|
341
|
-
async function _bulkSend(actionRecords, previousCount = 0, previousRetryDelay = 0
|
|
285
|
+
async function _bulkSend(actionRecords, previousCount = 0, previousRetryDelay = 0
|
|
286
|
+
// TODO: why are we returning a number?
|
|
287
|
+
) {
|
|
342
288
|
const body = actionRecords.flatMap((record, index) => {
|
|
343
289
|
if (record.action == null) {
|
|
344
290
|
let dbg = '';
|
|
@@ -347,74 +293,60 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
347
293
|
}
|
|
348
294
|
throw new Error(`Bulk send record is missing the action property${dbg}`);
|
|
349
295
|
}
|
|
350
|
-
|
|
351
296
|
if (!isElasticsearch6()) {
|
|
352
297
|
const actionKey = getFirstKey(record.action);
|
|
353
298
|
const { _type, ...withoutTypeAction } = record.action[actionKey];
|
|
354
299
|
// if data is specified return both
|
|
355
300
|
return record.data
|
|
356
301
|
? [{
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
302
|
+
...record.action,
|
|
303
|
+
[actionKey]: withoutTypeAction
|
|
304
|
+
},
|
|
305
|
+
record.data]
|
|
361
306
|
: [{
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
307
|
+
...record.action,
|
|
308
|
+
[actionKey]: withoutTypeAction
|
|
309
|
+
}];
|
|
365
310
|
}
|
|
366
|
-
|
|
367
311
|
// if data is specified return both
|
|
368
312
|
return record.data ? [record.action, record.data] : [record.action];
|
|
369
313
|
});
|
|
370
|
-
|
|
371
314
|
const response = await _clientRequest('bulk', { body });
|
|
372
315
|
const results = response.body ? response.body : response;
|
|
373
|
-
|
|
374
316
|
if (!results.errors) {
|
|
375
317
|
return results.items.reduce((c, item) => {
|
|
376
318
|
const [value] = Object.values(item);
|
|
377
319
|
// ignore non-successful status codes
|
|
378
|
-
if (value.status != null && value.status >= 400)
|
|
320
|
+
if (value.status != null && value.status >= 400)
|
|
321
|
+
return c;
|
|
379
322
|
return c + 1;
|
|
380
323
|
}, 0);
|
|
381
324
|
}
|
|
382
|
-
|
|
383
|
-
const {
|
|
384
|
-
retry, successful, error, reason
|
|
385
|
-
} = _filterRetryRecords(actionRecords, results);
|
|
386
|
-
|
|
325
|
+
const { retry, successful, error, reason } = _filterRetryRecords(actionRecords, results);
|
|
387
326
|
if (error && config._dead_letter_action !== 'kafka_dead_letter') {
|
|
388
327
|
throw new Error(`bulk send error: ${reason}`);
|
|
389
328
|
}
|
|
390
|
-
|
|
391
329
|
if (retry.length === 0) {
|
|
392
330
|
return previousCount + successful;
|
|
393
331
|
}
|
|
394
|
-
|
|
395
332
|
return _handleRetries(retry, previousCount + successful, previousRetryDelay);
|
|
396
333
|
}
|
|
397
|
-
|
|
398
334
|
async function _handleRetries(retry, affectedCount, previousRetryDelay) {
|
|
399
335
|
warning();
|
|
400
|
-
|
|
401
336
|
const nextRetryDelay = await _awaitRetry(previousRetryDelay);
|
|
402
337
|
return _bulkSend(retry, affectedCount, nextRetryDelay);
|
|
403
338
|
}
|
|
404
|
-
|
|
405
339
|
/**
|
|
406
340
|
* The new and improved bulk send with proper retry support
|
|
407
341
|
*
|
|
408
342
|
* @returns {Promise<number>} the number of affected rows
|
|
409
343
|
*/
|
|
410
|
-
function bulkSend(data) {
|
|
344
|
+
async function bulkSend(data) {
|
|
411
345
|
if (!Array.isArray(data)) {
|
|
412
346
|
throw new Error(`Expected bulkSend to receive an array, got ${data} (${getTypeOf(data)})`);
|
|
413
347
|
}
|
|
414
|
-
|
|
415
|
-
return Promise.resolve(_bulkSend(data));
|
|
348
|
+
return _bulkSend(data);
|
|
416
349
|
}
|
|
417
|
-
|
|
418
350
|
function _warn(warnLogger, msg) {
|
|
419
351
|
let _lastTime = null;
|
|
420
352
|
return () => {
|
|
@@ -429,81 +361,45 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
429
361
|
warnLogger.warn(msg);
|
|
430
362
|
};
|
|
431
363
|
}
|
|
432
|
-
|
|
433
364
|
function validateGeoParameters(opConfig) {
|
|
434
|
-
const {
|
|
435
|
-
geo_field: geoField,
|
|
436
|
-
geo_box_top_left: geoBoxTopLeft,
|
|
437
|
-
geo_box_bottom_right: geoBoxBottomRight,
|
|
438
|
-
geo_point: geoPoint,
|
|
439
|
-
geo_distance: geoDistance,
|
|
440
|
-
geo_sort_point: geoSortPoint,
|
|
441
|
-
geo_sort_order: geoSortOrder,
|
|
442
|
-
geo_sort_unit: geoSortUnit,
|
|
443
|
-
} = opConfig;
|
|
444
|
-
|
|
365
|
+
const { geo_field: geoField, geo_box_top_left: geoBoxTopLeft, geo_box_bottom_right: geoBoxBottomRight, geo_point: geoPoint, geo_distance: geoDistance, geo_sort_point: geoSortPoint, geo_sort_order: geoSortOrder, geo_sort_unit: geoSortUnit, } = opConfig;
|
|
445
366
|
function isBoundingBoxQuery() {
|
|
446
367
|
return geoBoxTopLeft && geoBoxBottomRight;
|
|
447
368
|
}
|
|
448
|
-
|
|
449
369
|
function isGeoDistanceQuery() {
|
|
450
370
|
return geoPoint && geoDistance;
|
|
451
371
|
}
|
|
452
|
-
|
|
453
372
|
if (geoBoxTopLeft && geoPoint) {
|
|
454
373
|
throw new Error('geo_box and geo_distance queries can not be combined.');
|
|
455
374
|
}
|
|
456
|
-
|
|
457
375
|
if ((geoPoint && !geoDistance) || (!geoPoint && geoDistance)) {
|
|
458
|
-
throw new Error(
|
|
459
|
-
'Both geo_point and geo_distance must be provided for a geo_point query.'
|
|
460
|
-
);
|
|
376
|
+
throw new Error('Both geo_point and geo_distance must be provided for a geo_point query.');
|
|
461
377
|
}
|
|
462
|
-
|
|
463
378
|
if ((geoBoxTopLeft && !geoBoxBottomRight) || (!geoBoxTopLeft && geoBoxBottomRight)) {
|
|
464
|
-
throw new Error(
|
|
465
|
-
'Both geo_box_top_left and geo_box_bottom_right must be provided for a geo bounding box query.'
|
|
466
|
-
);
|
|
379
|
+
throw new Error('Both geo_box_top_left and geo_box_bottom_right must be provided for a geo bounding box query.');
|
|
467
380
|
}
|
|
468
|
-
|
|
469
381
|
if (geoBoxTopLeft && (geoSortOrder || geoSortUnit) && !geoSortPoint) {
|
|
470
|
-
throw new Error(
|
|
471
|
-
'bounding box search requires geo_sort_point to be set if any other geo_sort_* parameter is provided'
|
|
472
|
-
);
|
|
382
|
+
throw new Error('bounding box search requires geo_sort_point to be set if any other geo_sort_* parameter is provided');
|
|
473
383
|
}
|
|
474
|
-
|
|
475
384
|
if ((geoBoxTopLeft || geoPoint || geoDistance || geoSortPoint) && !geoField) {
|
|
476
|
-
throw new Error(
|
|
477
|
-
'geo box search requires geo_field to be set if any other geo query parameters are provided'
|
|
478
|
-
);
|
|
385
|
+
throw new Error('geo box search requires geo_field to be set if any other geo query parameters are provided');
|
|
479
386
|
}
|
|
480
|
-
|
|
481
387
|
if (geoField && !(isBoundingBoxQuery() || isGeoDistanceQuery())) {
|
|
482
|
-
throw new Error(
|
|
483
|
-
'if geo_field is specified then the appropriate geo_box or geo_distance query parameters need to be provided as well'
|
|
484
|
-
);
|
|
388
|
+
throw new Error('if geo_field is specified then the appropriate geo_box or geo_distance query parameters need to be provided as well');
|
|
485
389
|
}
|
|
486
390
|
}
|
|
487
|
-
|
|
391
|
+
// TODO: might need to relocate the elasticsearch op type somewhere to be shared here
|
|
488
392
|
function geoSearch(opConfig) {
|
|
489
393
|
let isGeoSort = false;
|
|
394
|
+
// TODO: needs a more distinct type for geo search components
|
|
490
395
|
const queryResults = {};
|
|
491
396
|
// check for key existence to see if they are user defined
|
|
492
397
|
if (opConfig.geo_sort_order || opConfig.geo_sort_unit || opConfig.geo_sort_point) {
|
|
493
398
|
isGeoSort = true;
|
|
494
399
|
}
|
|
495
|
-
|
|
496
|
-
const {
|
|
497
|
-
geo_box_top_left: geoBoxTopLeft,
|
|
498
|
-
geo_box_bottom_right: geoBoxBottomRight,
|
|
499
|
-
geo_point: geoPoint,
|
|
500
|
-
geo_distance: geoDistance,
|
|
501
|
-
geo_sort_point: geoSortPoint,
|
|
502
|
-
geo_sort_order: geoSortOrder = 'asc',
|
|
503
|
-
geo_sort_unit: geoSortUnit = 'm',
|
|
504
|
-
} = opConfig;
|
|
505
|
-
|
|
400
|
+
const { geo_box_top_left: geoBoxTopLeft, geo_box_bottom_right: geoBoxBottomRight, geo_point: geoPoint, geo_distance: geoDistance, geo_sort_point: geoSortPoint, geo_sort_order: geoSortOrder = 'asc', geo_sort_unit: geoSortUnit = 'm', } = opConfig;
|
|
506
401
|
function createGeoSortQuery(location) {
|
|
402
|
+
// TODO: this needs a better type
|
|
507
403
|
const sortedSearch = { _geo_distance: {} };
|
|
508
404
|
sortedSearch._geo_distance[opConfig.geo_field] = {
|
|
509
405
|
lat: location[0],
|
|
@@ -513,22 +409,17 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
513
409
|
sortedSearch._geo_distance.unit = geoSortUnit;
|
|
514
410
|
return sortedSearch;
|
|
515
411
|
}
|
|
516
|
-
|
|
517
412
|
let parsedGeoSortPoint;
|
|
518
|
-
|
|
519
413
|
if (geoSortPoint) {
|
|
520
414
|
parsedGeoSortPoint = createGeoPoint(geoSortPoint);
|
|
521
415
|
}
|
|
522
|
-
|
|
523
416
|
// Handle an Geo Bounding Box query
|
|
524
417
|
if (geoBoxTopLeft) {
|
|
525
418
|
const topLeft = createGeoPoint(geoBoxTopLeft);
|
|
526
419
|
const bottomRight = createGeoPoint(geoBoxBottomRight);
|
|
527
|
-
|
|
528
420
|
const searchQuery = {
|
|
529
421
|
geo_bounding_box: {},
|
|
530
422
|
};
|
|
531
|
-
|
|
532
423
|
searchQuery.geo_bounding_box[opConfig.geo_field] = {
|
|
533
424
|
top_left: {
|
|
534
425
|
lat: topLeft[0],
|
|
@@ -539,16 +430,13 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
539
430
|
lon: bottomRight[1],
|
|
540
431
|
},
|
|
541
432
|
};
|
|
542
|
-
|
|
543
433
|
queryResults.query = searchQuery;
|
|
544
|
-
|
|
545
|
-
if (isGeoSort) {
|
|
434
|
+
// are these two tied together?
|
|
435
|
+
if (isGeoSort && parsedGeoSortPoint) {
|
|
546
436
|
queryResults.sort = createGeoSortQuery(parsedGeoSortPoint);
|
|
547
437
|
}
|
|
548
|
-
|
|
549
438
|
return queryResults;
|
|
550
439
|
}
|
|
551
|
-
|
|
552
440
|
if (geoDistance) {
|
|
553
441
|
const location = createGeoPoint(geoPoint);
|
|
554
442
|
const searchQuery = {
|
|
@@ -556,25 +444,22 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
556
444
|
distance: geoDistance,
|
|
557
445
|
},
|
|
558
446
|
};
|
|
559
|
-
|
|
560
447
|
searchQuery.geo_distance[opConfig.geo_field] = {
|
|
561
448
|
lat: location[0],
|
|
562
449
|
lon: location[1],
|
|
563
450
|
};
|
|
564
|
-
|
|
565
451
|
queryResults.query = searchQuery;
|
|
566
452
|
const locationPoints = parsedGeoSortPoint || location;
|
|
453
|
+
// TODO: need better geo parsing
|
|
567
454
|
queryResults.sort = createGeoSortQuery(locationPoints);
|
|
568
455
|
}
|
|
569
|
-
|
|
570
456
|
return queryResults;
|
|
571
457
|
}
|
|
572
|
-
|
|
573
458
|
function createGeoPoint(point) {
|
|
574
459
|
const pieces = point.split(',');
|
|
575
460
|
return pieces;
|
|
576
461
|
}
|
|
577
|
-
|
|
462
|
+
// TODO: need type contract between this and elasticsearch assets
|
|
578
463
|
function _buildRangeQuery(opConfig, msg) {
|
|
579
464
|
const body = {
|
|
580
465
|
query: {
|
|
@@ -591,7 +476,6 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
591
476
|
gte: msg.start,
|
|
592
477
|
lt: msg.end,
|
|
593
478
|
};
|
|
594
|
-
|
|
595
479
|
body.query.bool.must.push({ range: dateObj });
|
|
596
480
|
}
|
|
597
481
|
// TODO: deprecate this logic and remove in the future
|
|
@@ -599,12 +483,10 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
599
483
|
if (msg.key) {
|
|
600
484
|
body.query.bool.must.push({ wildcard: { _uid: msg.key } });
|
|
601
485
|
}
|
|
602
|
-
|
|
603
486
|
if (msg.wildcard) {
|
|
604
487
|
const { field, value } = msg.wildcard;
|
|
605
488
|
body.query.bool.must.push({ wildcard: { [field]: value } });
|
|
606
489
|
}
|
|
607
|
-
|
|
608
490
|
// elasticsearch lucene based query
|
|
609
491
|
if (opConfig.query) {
|
|
610
492
|
body.query.bool.must.push({
|
|
@@ -613,76 +495,63 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
613
495
|
},
|
|
614
496
|
});
|
|
615
497
|
}
|
|
616
|
-
|
|
617
498
|
if (opConfig.geo_field) {
|
|
618
499
|
validateGeoParameters(opConfig);
|
|
619
500
|
const geoQuery = geoSearch(opConfig);
|
|
620
501
|
body.query.bool.must.push(geoQuery.query);
|
|
621
|
-
if (geoQuery.sort)
|
|
502
|
+
if (geoQuery.sort)
|
|
503
|
+
body.sort = [geoQuery.sort];
|
|
622
504
|
}
|
|
623
505
|
return body;
|
|
624
506
|
}
|
|
625
|
-
|
|
626
507
|
function buildQuery(opConfig, msg) {
|
|
627
508
|
const query = {
|
|
628
509
|
index: opConfig.index,
|
|
629
510
|
size: msg.count,
|
|
630
511
|
body: _buildRangeQuery(opConfig, msg),
|
|
631
512
|
};
|
|
632
|
-
|
|
633
513
|
if (opConfig.fields) {
|
|
634
514
|
query._source = opConfig.fields;
|
|
635
515
|
}
|
|
636
516
|
return query;
|
|
637
517
|
}
|
|
638
|
-
|
|
639
|
-
function _searchES(query) {
|
|
518
|
+
async function _searchES(query) {
|
|
640
519
|
return new Promise((resolve, reject) => {
|
|
641
520
|
const errHandler = _errorHandler(_performSearch, query, reject, '->search()');
|
|
642
521
|
const retry = _retryFn(_performSearch, query, reject);
|
|
643
|
-
|
|
644
522
|
function _performSearch(queryParam) {
|
|
645
523
|
client
|
|
646
524
|
.search(queryParam)
|
|
647
525
|
.then((data) => {
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
const
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
)
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
},
|
|
673
|
-
});
|
|
674
|
-
reject(error);
|
|
675
|
-
} else {
|
|
676
|
-
retry();
|
|
677
|
-
}
|
|
678
|
-
})
|
|
526
|
+
const failuresReasons = [];
|
|
527
|
+
const results = data.body ? data.body : data;
|
|
528
|
+
const { failures, failed } = results._shards;
|
|
529
|
+
if (!failed) {
|
|
530
|
+
resolve(results);
|
|
531
|
+
return;
|
|
532
|
+
}
|
|
533
|
+
failuresReasons.push(...failures);
|
|
534
|
+
const reasons = uniq(flatten(failuresReasons.map((shard) => shard.reason.type)));
|
|
535
|
+
if (reasons.length > 1
|
|
536
|
+
|| reasons[0] !== 'es_rejected_execution_exception') {
|
|
537
|
+
const errorReason = reasons.join(' | ');
|
|
538
|
+
const error = new TSError(errorReason, {
|
|
539
|
+
reason: 'Not all shards returned successful',
|
|
540
|
+
context: {
|
|
541
|
+
connection,
|
|
542
|
+
},
|
|
543
|
+
});
|
|
544
|
+
reject(error);
|
|
545
|
+
}
|
|
546
|
+
else {
|
|
547
|
+
retry();
|
|
548
|
+
}
|
|
549
|
+
})
|
|
679
550
|
.catch(errHandler);
|
|
680
551
|
}
|
|
681
|
-
|
|
682
552
|
waitForClient(() => _performSearch(query), reject);
|
|
683
553
|
});
|
|
684
554
|
}
|
|
685
|
-
|
|
686
555
|
function _adjustTypeForEs7(query) {
|
|
687
556
|
if (!isElasticsearch6()) {
|
|
688
557
|
if (Array.isArray(query)) {
|
|
@@ -690,18 +559,15 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
690
559
|
}
|
|
691
560
|
delete query.type;
|
|
692
561
|
}
|
|
693
|
-
|
|
694
562
|
return query;
|
|
695
563
|
}
|
|
696
|
-
|
|
697
564
|
function _removeTypeFromBulkRequest(query) {
|
|
698
|
-
if (isElasticsearch6())
|
|
699
|
-
|
|
565
|
+
if (isElasticsearch6())
|
|
566
|
+
return query;
|
|
700
567
|
return query.map((queryItem) => {
|
|
701
568
|
if (isSimpleObject(queryItem)) {
|
|
702
569
|
// get the metadata and ignore the record
|
|
703
570
|
const bulkMetaData = _getBulkMetaData(queryItem);
|
|
704
|
-
|
|
705
571
|
if (_hasBulkMetaDataProps(bulkMetaData)) {
|
|
706
572
|
delete bulkMetaData._type;
|
|
707
573
|
}
|
|
@@ -709,7 +575,7 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
709
575
|
return queryItem;
|
|
710
576
|
});
|
|
711
577
|
}
|
|
712
|
-
|
|
578
|
+
// TODO: this is not even being called in bulk calls
|
|
713
579
|
function _getBulkMetaData(queryItem) {
|
|
714
580
|
// bulk actions are index, create, delete, and update
|
|
715
581
|
return queryItem.index
|
|
@@ -717,7 +583,6 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
717
583
|
|| queryItem.delete
|
|
718
584
|
|| queryItem.update;
|
|
719
585
|
}
|
|
720
|
-
|
|
721
586
|
function _hasBulkMetaDataProps(bulkMetaData) {
|
|
722
587
|
return bulkMetaData
|
|
723
588
|
&& isSimpleObject(bulkMetaData)
|
|
@@ -725,61 +590,57 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
725
590
|
&& '_id' in bulkMetaData
|
|
726
591
|
&& '_type' in bulkMetaData;
|
|
727
592
|
}
|
|
728
|
-
|
|
729
593
|
/**
|
|
730
594
|
* Wait for the client to be available before resolving,
|
|
731
595
|
* this will also naturally stagger many in-flight requests
|
|
732
596
|
*
|
|
733
597
|
* - reject if the connection is closed
|
|
734
598
|
* - resolve after timeout to let the underlying client deal with any problems
|
|
599
|
+
* TODO: this is not ok, relying on esoteric promise behavior and being checked on every call
|
|
735
600
|
*/
|
|
736
|
-
function waitForClient(resolve, reject) {
|
|
601
|
+
async function waitForClient(resolve, reject) {
|
|
602
|
+
// @ts-expect-error
|
|
737
603
|
let intervalId = null;
|
|
738
604
|
const startTime = Date.now();
|
|
739
|
-
|
|
740
605
|
// set different values for when process.env.NODE_ENV === test
|
|
741
606
|
const timeoutMs = isTest ? 1000 : random(5000, 15000);
|
|
742
607
|
const intervalMs = isTest ? 50 : 100;
|
|
743
|
-
|
|
744
608
|
// avoiding setting the interval if we don't need to
|
|
745
609
|
if (_checkClient()) {
|
|
746
610
|
return;
|
|
747
611
|
}
|
|
748
|
-
|
|
749
612
|
intervalId = setInterval(_checkClient, intervalMs);
|
|
750
|
-
|
|
751
613
|
function _checkClient() {
|
|
752
614
|
const elapsed = Date.now() - startTime;
|
|
753
615
|
try {
|
|
754
616
|
const valid = verifyClient();
|
|
755
|
-
if (!valid && elapsed <= timeoutMs)
|
|
756
|
-
|
|
617
|
+
if (!valid && elapsed <= timeoutMs)
|
|
618
|
+
return false;
|
|
619
|
+
// @ts-expect-error
|
|
757
620
|
clearInterval(intervalId);
|
|
758
621
|
resolve(elapsed);
|
|
759
622
|
return true;
|
|
760
|
-
}
|
|
623
|
+
}
|
|
624
|
+
catch (err) {
|
|
625
|
+
// @ts-expect-error
|
|
761
626
|
clearInterval(intervalId);
|
|
762
627
|
reject(err);
|
|
763
628
|
return true;
|
|
764
629
|
}
|
|
765
630
|
}
|
|
766
631
|
}
|
|
767
|
-
|
|
768
632
|
function _retryFn(fn, data, reject) {
|
|
769
633
|
let delay = 0;
|
|
770
|
-
|
|
771
634
|
return (_data) => {
|
|
772
635
|
const args = _data || data;
|
|
773
|
-
|
|
774
636
|
_awaitRetry(delay)
|
|
775
637
|
.then((newDelay) => {
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
638
|
+
delay = newDelay;
|
|
639
|
+
fn(args);
|
|
640
|
+
})
|
|
779
641
|
.catch(reject);
|
|
780
642
|
};
|
|
781
643
|
}
|
|
782
|
-
|
|
783
644
|
/**
|
|
784
645
|
* @returns {Promise<number>} the delayed time
|
|
785
646
|
*/
|
|
@@ -787,109 +648,95 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
787
648
|
return new Promise((resolve, reject) => {
|
|
788
649
|
waitForClient((elapsed) => {
|
|
789
650
|
const delay = getBackoffDelay(previousDelay, 2, retryLimit, retryStart);
|
|
790
|
-
|
|
791
651
|
let timeoutMs = delay - elapsed;
|
|
792
|
-
if (timeoutMs < 1)
|
|
793
|
-
|
|
652
|
+
if (timeoutMs < 1)
|
|
653
|
+
timeoutMs = 1;
|
|
654
|
+
// TODO: this is super confusing
|
|
794
655
|
setTimeout(resolve, timeoutMs, delay);
|
|
795
656
|
}, reject);
|
|
796
657
|
});
|
|
797
658
|
}
|
|
798
|
-
|
|
799
659
|
function isConnectionErrorMessage(err) {
|
|
800
660
|
const msg = get(err, 'message', '');
|
|
801
661
|
return msg.includes('No Living connections') || msg.includes('ECONNREFUSED');
|
|
802
662
|
}
|
|
803
|
-
|
|
804
663
|
function isErrorRetryable(err) {
|
|
805
664
|
const checkErrorMsg = isRetryableError(err);
|
|
806
|
-
|
|
807
665
|
if (checkErrorMsg) {
|
|
808
666
|
return true;
|
|
809
667
|
}
|
|
810
|
-
|
|
811
668
|
const isRejectedError = get(err, 'body.error.type') === 'es_rejected_execution_exception';
|
|
812
669
|
const isConnectionError = isConnectionErrorMessage(err);
|
|
813
|
-
|
|
814
670
|
if (isRejectedError || isConnectionError) {
|
|
815
671
|
return true;
|
|
816
672
|
}
|
|
817
|
-
|
|
818
673
|
return false;
|
|
819
674
|
}
|
|
820
|
-
|
|
821
675
|
function _errorHandler(fn, data, reject, fnName = '->unknown()') {
|
|
822
676
|
const retry = _retryFn(fn, data, reject);
|
|
823
|
-
|
|
824
677
|
return function _errorHandlerFn(err) {
|
|
825
678
|
const retryable = isErrorRetryable(err);
|
|
826
|
-
|
|
827
679
|
if (retryable) {
|
|
828
680
|
retry();
|
|
829
|
-
}
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
);
|
|
681
|
+
}
|
|
682
|
+
else {
|
|
683
|
+
reject(new TSError(err, {
|
|
684
|
+
context: {
|
|
685
|
+
fnName,
|
|
686
|
+
connection,
|
|
687
|
+
},
|
|
688
|
+
}));
|
|
838
689
|
}
|
|
839
690
|
};
|
|
840
691
|
}
|
|
841
|
-
|
|
842
|
-
function isAvailable(index, recordType) {
|
|
692
|
+
async function isAvailable(index, recordType) {
|
|
843
693
|
const query = {
|
|
844
694
|
index,
|
|
845
695
|
q: '',
|
|
846
696
|
size: 0,
|
|
697
|
+
// @ts-expect-error TODO: should be a number, check what its really doing
|
|
847
698
|
terminate_after: '1',
|
|
848
699
|
};
|
|
849
|
-
|
|
850
700
|
const label = recordType ? `for ${recordType}` : index;
|
|
851
|
-
|
|
852
701
|
return new Promise((resolve, reject) => {
|
|
853
702
|
client
|
|
854
703
|
.search(query)
|
|
855
704
|
.then((results) => {
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
705
|
+
logger.trace(`index ${label} is now available`);
|
|
706
|
+
resolve(results);
|
|
707
|
+
})
|
|
859
708
|
.catch(() => {
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
return;
|
|
870
|
-
}
|
|
871
|
-
} catch (err) {
|
|
872
|
-
running = false;
|
|
873
|
-
clearInterval(checkInterval);
|
|
874
|
-
reject(err);
|
|
709
|
+
let running = false;
|
|
710
|
+
const checkInterval = setInterval(() => {
|
|
711
|
+
if (running)
|
|
712
|
+
return;
|
|
713
|
+
running = true;
|
|
714
|
+
try {
|
|
715
|
+
const valid = verifyClient();
|
|
716
|
+
if (!valid) {
|
|
717
|
+
logger.debug(`index ${label} is in an invalid state`);
|
|
875
718
|
return;
|
|
876
719
|
}
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
720
|
+
}
|
|
721
|
+
catch (err) {
|
|
722
|
+
running = false;
|
|
723
|
+
clearInterval(checkInterval);
|
|
724
|
+
reject(err);
|
|
725
|
+
return;
|
|
726
|
+
}
|
|
727
|
+
client
|
|
728
|
+
.search(query)
|
|
729
|
+
.then((results) => {
|
|
730
|
+
running = false;
|
|
731
|
+
clearInterval(checkInterval);
|
|
732
|
+
resolve(results);
|
|
733
|
+
})
|
|
734
|
+
.catch(() => {
|
|
735
|
+
running = false;
|
|
736
|
+
logger.warn(`verifying index ${label} is open`);
|
|
737
|
+
});
|
|
738
|
+
}, 200);
|
|
739
|
+
});
|
|
893
740
|
});
|
|
894
741
|
}
|
|
895
742
|
// TODO: verifyClient needs to be checked with new client usage
|
|
@@ -906,14 +753,12 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
906
753
|
fatalError: true
|
|
907
754
|
});
|
|
908
755
|
}
|
|
909
|
-
|
|
910
756
|
const alive = get(client, 'transport.connectionPool._conns.alive');
|
|
911
757
|
// so we don't break existing tests with mocked clients, we will default to 1
|
|
912
758
|
const aliveCount = alive && Array.isArray(alive) ? alive.length : 1;
|
|
913
759
|
if (!aliveCount) {
|
|
914
760
|
return false;
|
|
915
761
|
}
|
|
916
|
-
|
|
917
762
|
return true;
|
|
918
763
|
}
|
|
919
764
|
/** This is deprecated as an external api,
|
|
@@ -921,29 +766,23 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
921
766
|
* */
|
|
922
767
|
function getESVersion() {
|
|
923
768
|
const newClientVersion = get(client, '__meta.majorVersion');
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
769
|
+
if (newClientVersion)
|
|
770
|
+
return newClientVersion;
|
|
927
771
|
// legacy
|
|
928
772
|
const esVersion = get(client, 'transport._config.apiVersion', '6.5');
|
|
929
|
-
|
|
930
773
|
if (esVersion && isString(esVersion)) {
|
|
931
774
|
const [majorVersion] = esVersion.split('.');
|
|
932
775
|
return toNumber(majorVersion);
|
|
933
776
|
}
|
|
934
|
-
|
|
935
777
|
return 6;
|
|
936
778
|
}
|
|
937
|
-
|
|
938
779
|
function getClientMetadata() {
|
|
939
780
|
if (client.__meta) {
|
|
940
781
|
return client.__meta;
|
|
941
782
|
}
|
|
942
|
-
|
|
943
783
|
const esVersion = get(client, 'transport._config.apiVersion', '6.5');
|
|
944
784
|
const distribution = ElasticsearchDistribution.elasticsearch;
|
|
945
785
|
const [majorVersion = 6, minorVersion = 5] = esVersion.split('.').map(toNumber);
|
|
946
|
-
|
|
947
786
|
return {
|
|
948
787
|
distribution,
|
|
949
788
|
version: esVersion,
|
|
@@ -951,46 +790,40 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
951
790
|
minorVersion
|
|
952
791
|
};
|
|
953
792
|
}
|
|
954
|
-
|
|
955
793
|
function isElasticsearch6() {
|
|
956
794
|
const { distribution, majorVersion } = getClientMetadata();
|
|
957
795
|
return distribution === ElasticsearchDistribution.elasticsearch && majorVersion === 6;
|
|
958
796
|
}
|
|
959
|
-
|
|
960
797
|
function isElasticsearch8() {
|
|
961
798
|
const { distribution, majorVersion } = getClientMetadata();
|
|
962
799
|
return distribution === ElasticsearchDistribution.elasticsearch && majorVersion === 8;
|
|
963
800
|
}
|
|
964
|
-
|
|
965
801
|
function isOpensearch2() {
|
|
966
802
|
const { distribution, majorVersion } = getClientMetadata();
|
|
967
803
|
return distribution === ElasticsearchDistribution.opensearch && majorVersion === 2;
|
|
968
804
|
}
|
|
969
|
-
|
|
970
805
|
function isOpensearch3() {
|
|
971
806
|
const { distribution, majorVersion } = getClientMetadata();
|
|
972
807
|
return distribution === ElasticsearchDistribution.opensearch && majorVersion === 3;
|
|
973
808
|
}
|
|
974
|
-
|
|
975
809
|
function _fixMappingRequest(_params, isTemplate) {
|
|
976
810
|
if (!_params || !_params.body) {
|
|
977
811
|
throw new Error('Invalid mapping request');
|
|
978
812
|
}
|
|
979
813
|
const params = cloneDeep(_params);
|
|
980
814
|
const defaultParams = {};
|
|
981
|
-
|
|
982
815
|
if (params.body.template != null) {
|
|
983
816
|
if (isTemplate && params.body.index_patterns == null) {
|
|
984
817
|
params.body.index_patterns = castArray(params.body.template).slice();
|
|
985
818
|
}
|
|
986
819
|
delete params.body.template;
|
|
987
820
|
}
|
|
988
|
-
|
|
989
821
|
if (!isElasticsearch6()) {
|
|
990
822
|
const typeMappings = get(params.body, 'mappings', {});
|
|
991
823
|
if (typeMappings.properties) {
|
|
992
824
|
defaultParams.includeTypeName = false;
|
|
993
|
-
}
|
|
825
|
+
}
|
|
826
|
+
else {
|
|
994
827
|
defaultParams.includeTypeName = true;
|
|
995
828
|
Object.values(typeMappings).forEach((typeMapping) => {
|
|
996
829
|
if (typeMapping && typeMapping._all) {
|
|
@@ -1000,15 +833,12 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
1000
833
|
});
|
|
1001
834
|
}
|
|
1002
835
|
}
|
|
1003
|
-
|
|
1004
|
-
if (isElasticsearch8(client) || isOpensearch2(client) || isOpensearch3(client)) {
|
|
836
|
+
if (isElasticsearch8() || isOpensearch2() || isOpensearch3()) {
|
|
1005
837
|
delete defaultParams.includeTypeName;
|
|
1006
838
|
}
|
|
1007
|
-
|
|
1008
839
|
return Object.assign({}, defaultParams, params);
|
|
1009
840
|
}
|
|
1010
|
-
|
|
1011
|
-
function _migrate(index, migrantIndexName, mapping, recordType, clusterName) {
|
|
841
|
+
async function _migrate(index, migrantIndexName, mapping, recordType, clusterName) {
|
|
1012
842
|
const reindexQuery = {
|
|
1013
843
|
slices: 4,
|
|
1014
844
|
waitForCompletion: true,
|
|
@@ -1022,104 +852,86 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
1022
852
|
},
|
|
1023
853
|
},
|
|
1024
854
|
};
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
}
|
|
1042
|
-
)
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
const error = new TSError(err, {
|
|
1055
|
-
reason: `could not put alias for index: ${migrantIndexName}, name: ${index}`,
|
|
1056
|
-
});
|
|
1057
|
-
return Promise.reject(error);
|
|
1058
|
-
}
|
|
1059
|
-
));
|
|
855
|
+
try {
|
|
856
|
+
const [docCount] = await Promise.all([
|
|
857
|
+
count({ index }),
|
|
858
|
+
// the empty string is not great, should maybe separate index creation logic
|
|
859
|
+
_createIndex(migrantIndexName, '', mapping, recordType, clusterName),
|
|
860
|
+
]);
|
|
861
|
+
await _clientRequest('reindex', reindexQuery);
|
|
862
|
+
const newDocCount = await count({ index: migrantIndexName });
|
|
863
|
+
if (docCount !== newDocCount) {
|
|
864
|
+
const errMsg = `reindex error, index: ${migrantIndexName} only has ${docCount} docs, expected ${docCount} from index: ${index}`;
|
|
865
|
+
throw new Error(errMsg);
|
|
866
|
+
}
|
|
867
|
+
}
|
|
868
|
+
catch (err) {
|
|
869
|
+
throw new TSError(err, {
|
|
870
|
+
reason: `could not reindex for query ${JSON.stringify(reindexQuery)}`,
|
|
871
|
+
context: { connection },
|
|
872
|
+
});
|
|
873
|
+
}
|
|
874
|
+
try {
|
|
875
|
+
await _clientIndicesRequest('delete', { index });
|
|
876
|
+
await _clientIndicesRequest('putAlias', { index: migrantIndexName, name: index });
|
|
877
|
+
}
|
|
878
|
+
catch (err) {
|
|
879
|
+
const error = new TSError(err, {
|
|
880
|
+
reason: `could not put alias for index: ${migrantIndexName}, name: ${index}`,
|
|
881
|
+
});
|
|
882
|
+
throw error;
|
|
883
|
+
}
|
|
1060
884
|
}
|
|
1061
|
-
|
|
1062
|
-
function _createIndex(index, migrantIndexName, mapping, recordType, clusterName) {
|
|
885
|
+
async function _createIndex(index, migrantIndexName, mapping, recordType, clusterName) {
|
|
1063
886
|
const existQuery = { index };
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
return
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
reason: `Could not create index: ${index}`,
|
|
1082
|
-
});
|
|
1083
|
-
return Promise.reject(error);
|
|
1084
|
-
}
|
|
1085
|
-
return true;
|
|
887
|
+
const exists = await indexExists(existQuery);
|
|
888
|
+
if (!exists) {
|
|
889
|
+
// Make sure the index exists before we do anything else.
|
|
890
|
+
const createQuery = {
|
|
891
|
+
index,
|
|
892
|
+
body: mapping,
|
|
893
|
+
};
|
|
894
|
+
try {
|
|
895
|
+
await _sendTemplate(mapping, recordType, clusterName);
|
|
896
|
+
return indexCreate(createQuery);
|
|
897
|
+
}
|
|
898
|
+
catch (err) {
|
|
899
|
+
// It's not really an error if it's just that the index is already there
|
|
900
|
+
const errStr = parseError(err, true);
|
|
901
|
+
if (!errStr.includes('already_exists_exception')) {
|
|
902
|
+
throw new TSError(err, {
|
|
903
|
+
reason: `Could not create index: ${index}`,
|
|
1086
904
|
});
|
|
905
|
+
}
|
|
1087
906
|
}
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
reason: `error while migrating index: ${existQuery.index}`,
|
|
1097
|
-
fatalError: true,
|
|
1098
|
-
});
|
|
1099
|
-
return Promise.reject(error);
|
|
907
|
+
}
|
|
908
|
+
try {
|
|
909
|
+
await _checkAndUpdateMapping(clusterName, index, migrantIndexName, mapping, recordType);
|
|
910
|
+
}
|
|
911
|
+
catch (err) {
|
|
912
|
+
throw new TSError(err, {
|
|
913
|
+
reason: `error while migrating index: ${existQuery.index}`,
|
|
914
|
+
fatalError: true,
|
|
1100
915
|
});
|
|
1101
|
-
}
|
|
1102
|
-
.catch((err) => Promise.reject(err));
|
|
916
|
+
}
|
|
1103
917
|
}
|
|
1104
|
-
|
|
1105
|
-
function _verifyMapping(query, configMapping, recordType) {
|
|
918
|
+
async function _verifyMapping(query, configMapping, recordType) {
|
|
1106
919
|
const params = Object.assign({}, query);
|
|
1107
|
-
|
|
1108
920
|
if (!isElasticsearch6()) {
|
|
1109
921
|
if (recordType) {
|
|
1110
922
|
params.includeTypeName = true;
|
|
1111
923
|
}
|
|
1112
924
|
}
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
925
|
+
try {
|
|
926
|
+
const mapping = await _clientIndicesRequest('getMapping', params);
|
|
927
|
+
return _areSameMappings(configMapping, mapping, recordType);
|
|
928
|
+
}
|
|
929
|
+
catch (err) {
|
|
930
|
+
throw new TSError(err, {
|
|
931
|
+
reason: `could not get mapping for query ${JSON.stringify(params)}`,
|
|
1120
932
|
});
|
|
933
|
+
}
|
|
1121
934
|
}
|
|
1122
|
-
|
|
1123
935
|
function _areSameMappings(configMapping, mapping, recordType) {
|
|
1124
936
|
const sysMapping = {};
|
|
1125
937
|
const index = Object.keys(mapping)[0];
|
|
@@ -1132,28 +944,23 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
1132
944
|
const areEqual = isDeepEqual(mapping, sysMapping);
|
|
1133
945
|
return { areEqual };
|
|
1134
946
|
}
|
|
1135
|
-
|
|
1136
|
-
function _checkAndUpdateMapping(clusterName, index, migrantIndexName, mapping, recordType) {
|
|
947
|
+
async function _checkAndUpdateMapping(clusterName, index, migrantIndexName, mapping, recordType) {
|
|
1137
948
|
if (index === migrantIndexName || migrantIndexName === null) {
|
|
1138
|
-
const error = new TSError(
|
|
1139
|
-
`index and migrant index names are the same: ${index}, please update the appropriate package.json version`
|
|
1140
|
-
);
|
|
949
|
+
const error = new TSError(`index and migrant index names are the same: ${index}, please update the appropriate package.json version`);
|
|
1141
950
|
return Promise.reject(error);
|
|
1142
951
|
}
|
|
1143
|
-
|
|
1144
952
|
const query = { index };
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
953
|
+
const results = await _verifyMapping(query, mapping, recordType);
|
|
954
|
+
if (results.areEqual)
|
|
955
|
+
return true;
|
|
956
|
+
// For state and analytics, we will not _migrate, but will post template so that
|
|
957
|
+
// the next index will have them
|
|
958
|
+
if (recordType === 'state' || recordType === 'analytics') {
|
|
959
|
+
return _sendTemplate(mapping, recordType, clusterName);
|
|
960
|
+
}
|
|
961
|
+
return _migrate(index, migrantIndexName, mapping, recordType, clusterName);
|
|
1154
962
|
}
|
|
1155
|
-
|
|
1156
|
-
function _sendTemplate(mapping, recordType, clusterName) {
|
|
963
|
+
async function _sendTemplate(mapping, recordType, clusterName) {
|
|
1157
964
|
if (mapping.template) {
|
|
1158
965
|
const name = `${clusterName}_${recordType}_template`;
|
|
1159
966
|
// setting template name to reflect current teraslice instance name to help prevent
|
|
@@ -1163,92 +970,68 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
1163
970
|
}
|
|
1164
971
|
return putTemplate(mapping, name);
|
|
1165
972
|
}
|
|
1166
|
-
|
|
973
|
+
// TODO: this seems really silly
|
|
1167
974
|
return Promise.resolve(true);
|
|
1168
975
|
}
|
|
1169
|
-
|
|
1170
|
-
function indexSetup(
|
|
1171
|
-
clusterName,
|
|
1172
|
-
newIndex,
|
|
1173
|
-
migrantIndexName,
|
|
1174
|
-
mapping,
|
|
1175
|
-
recordType,
|
|
1176
|
-
clientName,
|
|
1177
|
-
_time
|
|
1178
|
-
) {
|
|
976
|
+
async function indexSetup(clusterName, newIndex, migrantIndexName, mapping, recordType, clientName, _time) {
|
|
1179
977
|
const giveupAfter = Date.now() + (_time || 10000);
|
|
1180
978
|
return new Promise((resolve, reject) => {
|
|
979
|
+
// TODO: not going to touch this, needs a heavy refactor
|
|
980
|
+
// this contains the behavior for teraslice to continually wait
|
|
981
|
+
// until elasticsearch is available before making the store index
|
|
1181
982
|
const attemptToCreateIndex = () => {
|
|
1182
983
|
_createIndex(newIndex, migrantIndexName, mapping, recordType, clusterName)
|
|
1183
984
|
.then(() => isAvailable(newIndex))
|
|
1184
985
|
.catch((err) => {
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
newIndex,
|
|
1191
|
-
migrantIndexName,
|
|
1192
|
-
clusterName,
|
|
1193
|
-
connection,
|
|
1194
|
-
},
|
|
1195
|
-
});
|
|
1196
|
-
|
|
1197
|
-
logger.error(error);
|
|
1198
|
-
|
|
1199
|
-
logger.info(`Attempting to connect to elasticsearch: ${clientName}`);
|
|
1200
|
-
return _createIndex(
|
|
986
|
+
if (isFatalError(err))
|
|
987
|
+
return Promise.reject(err);
|
|
988
|
+
const error = new TSError(err, {
|
|
989
|
+
reason: 'Failure to create index',
|
|
990
|
+
context: {
|
|
1201
991
|
newIndex,
|
|
1202
992
|
migrantIndexName,
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
logger.info(
|
|
1235
|
-
checkingError,
|
|
1236
|
-
`Attempting to connect to elasticsearch: ${clientName}`
|
|
1237
|
-
);
|
|
1238
|
-
|
|
1239
|
-
return Promise.resolve();
|
|
1240
|
-
})
|
|
1241
|
-
.then(() => attemptToCreateIndex());
|
|
993
|
+
clusterName,
|
|
994
|
+
connection,
|
|
995
|
+
},
|
|
996
|
+
});
|
|
997
|
+
logger.error(error);
|
|
998
|
+
logger.info(`Attempting to connect to elasticsearch: ${clientName}`);
|
|
999
|
+
return _createIndex(newIndex, migrantIndexName, mapping, recordType, clusterName)
|
|
1000
|
+
.then(() => {
|
|
1001
|
+
const query = { index: newIndex };
|
|
1002
|
+
return indexRecovery(query);
|
|
1003
|
+
})
|
|
1004
|
+
.then((results) => {
|
|
1005
|
+
let bool = false;
|
|
1006
|
+
if (Object.keys(results).length !== 0) {
|
|
1007
|
+
const isPrimary = results[newIndex].shards.filter((shard) => shard.primary === true);
|
|
1008
|
+
bool = isPrimary.every((shard) => shard.stage === 'DONE');
|
|
1009
|
+
}
|
|
1010
|
+
if (bool) {
|
|
1011
|
+
logger.info('connection to elasticsearch has been established');
|
|
1012
|
+
return isAvailable(newIndex);
|
|
1013
|
+
}
|
|
1014
|
+
return Promise.resolve();
|
|
1015
|
+
})
|
|
1016
|
+
.catch((_checkingError) => {
|
|
1017
|
+
if (Date.now() > giveupAfter) {
|
|
1018
|
+
const timeoutError = new TSError(`Unable to create index ${newIndex}`);
|
|
1019
|
+
return Promise.reject(timeoutError);
|
|
1020
|
+
}
|
|
1021
|
+
const checkingError = new TSError(_checkingError);
|
|
1022
|
+
logger.info(checkingError, `Attempting to connect to elasticsearch: ${clientName}`);
|
|
1023
|
+
return Promise.resolve();
|
|
1242
1024
|
})
|
|
1025
|
+
.then(() => attemptToCreateIndex());
|
|
1026
|
+
})
|
|
1243
1027
|
.then(() => resolve(true))
|
|
1244
1028
|
.catch((err) => {
|
|
1245
|
-
|
|
1246
|
-
|
|
1029
|
+
reject(err);
|
|
1030
|
+
});
|
|
1247
1031
|
};
|
|
1248
1032
|
attemptToCreateIndex();
|
|
1249
1033
|
});
|
|
1250
1034
|
}
|
|
1251
|
-
|
|
1252
1035
|
return {
|
|
1253
1036
|
search,
|
|
1254
1037
|
count,
|
|
@@ -1285,3 +1068,4 @@ export default function elasticsearchApi(client, logger, _opConfig) {
|
|
|
1285
1068
|
isErrorRetryable
|
|
1286
1069
|
};
|
|
1287
1070
|
}
|
|
1071
|
+
//# sourceMappingURL=index.js.map
|