teraslice 0.87.1 → 0.88.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cluster-service.js +24 -18
- package/dist/src/index.js +42 -0
- package/package.json +11 -15
- package/service.js +4 -6
- package/worker-service.js +6 -6
- package/index.js +0 -21
- package/lib/cluster/cluster_master.js +0 -164
- package/lib/cluster/node_master.js +0 -393
- package/lib/cluster/services/api.js +0 -581
- package/lib/cluster/services/assets.js +0 -211
- package/lib/cluster/services/cluster/backends/kubernetes/deployments/worker.hbs +0 -86
- package/lib/cluster/services/cluster/backends/kubernetes/index.js +0 -225
- package/lib/cluster/services/cluster/backends/kubernetes/jobs/execution_controller.hbs +0 -69
- package/lib/cluster/services/cluster/backends/kubernetes/k8s.js +0 -450
- package/lib/cluster/services/cluster/backends/kubernetes/k8sResource.js +0 -443
- package/lib/cluster/services/cluster/backends/kubernetes/k8sState.js +0 -67
- package/lib/cluster/services/cluster/backends/kubernetes/utils.js +0 -58
- package/lib/cluster/services/cluster/backends/native/index.js +0 -611
- package/lib/cluster/services/cluster/backends/native/messaging.js +0 -563
- package/lib/cluster/services/cluster/backends/state-utils.js +0 -49
- package/lib/cluster/services/cluster/index.js +0 -15
- package/lib/cluster/services/execution.js +0 -459
- package/lib/cluster/services/jobs.js +0 -303
- package/lib/config/default-sysconfig.js +0 -47
- package/lib/config/index.js +0 -32
- package/lib/config/schemas/system.js +0 -333
- package/lib/processors/save_file/index.js +0 -9
- package/lib/processors/save_file/processor.js +0 -17
- package/lib/processors/save_file/schema.js +0 -17
- package/lib/processors/script.js +0 -130
- package/lib/processors/stdout/index.js +0 -9
- package/lib/processors/stdout/processor.js +0 -19
- package/lib/processors/stdout/schema.js +0 -18
- package/lib/storage/analytics.js +0 -106
- package/lib/storage/assets.js +0 -275
- package/lib/storage/backends/elasticsearch_store.js +0 -567
- package/lib/storage/backends/mappings/analytics.json +0 -49
- package/lib/storage/backends/mappings/asset.json +0 -40
- package/lib/storage/backends/mappings/ex.json +0 -55
- package/lib/storage/backends/mappings/job.json +0 -31
- package/lib/storage/backends/mappings/state.json +0 -37
- package/lib/storage/execution.js +0 -331
- package/lib/storage/index.js +0 -16
- package/lib/storage/jobs.js +0 -97
- package/lib/storage/state.js +0 -302
- package/lib/utils/api_utils.js +0 -173
- package/lib/utils/asset_utils.js +0 -117
- package/lib/utils/date_utils.js +0 -58
- package/lib/utils/encoding_utils.js +0 -29
- package/lib/utils/events.js +0 -7
- package/lib/utils/file_utils.js +0 -118
- package/lib/utils/id_utils.js +0 -19
- package/lib/utils/port_utils.js +0 -83
- package/lib/workers/assets/loader.js +0 -109
- package/lib/workers/assets/spawn.js +0 -78
- package/lib/workers/context/execution-context.js +0 -16
- package/lib/workers/context/terafoundation-context.js +0 -10
- package/lib/workers/execution-controller/execution-analytics.js +0 -211
- package/lib/workers/execution-controller/index.js +0 -1033
- package/lib/workers/execution-controller/recovery.js +0 -188
- package/lib/workers/execution-controller/scheduler.js +0 -461
- package/lib/workers/execution-controller/slice-analytics.js +0 -115
- package/lib/workers/helpers/job.js +0 -93
- package/lib/workers/helpers/op-analytics.js +0 -22
- package/lib/workers/helpers/terafoundation.js +0 -43
- package/lib/workers/helpers/worker-shutdown.js +0 -187
- package/lib/workers/metrics/index.js +0 -139
- package/lib/workers/worker/index.js +0 -344
- package/lib/workers/worker/slice.js +0 -143
package/lib/storage/state.js
DELETED
|
@@ -1,302 +0,0 @@
|
|
|
1
|
-
'use strict';
|
|
2
|
-
|
|
3
|
-
const {
|
|
4
|
-
RecoveryCleanupType
|
|
5
|
-
} = require('@terascope/job-components');
|
|
6
|
-
const {
|
|
7
|
-
TSError,
|
|
8
|
-
pRetry,
|
|
9
|
-
toString,
|
|
10
|
-
isRetryableError,
|
|
11
|
-
parseErrorInfo,
|
|
12
|
-
isTest,
|
|
13
|
-
times,
|
|
14
|
-
getFullErrorStack,
|
|
15
|
-
} = require('@terascope/utils');
|
|
16
|
-
const { timeseriesIndex } = require('../utils/date_utils');
|
|
17
|
-
const { makeLogger } = require('../workers/helpers/terafoundation');
|
|
18
|
-
const elasticsearchBackend = require('./backends/elasticsearch_store');
|
|
19
|
-
|
|
20
|
-
const SliceState = Object.freeze({
|
|
21
|
-
pending: 'pending',
|
|
22
|
-
start: 'start',
|
|
23
|
-
error: 'error',
|
|
24
|
-
completed: 'completed',
|
|
25
|
-
});
|
|
26
|
-
|
|
27
|
-
// Module to manager job states in Elasticsearch.
|
|
28
|
-
// All functions in this module return promises that must be resolved to
|
|
29
|
-
// get the final result.
|
|
30
|
-
async function stateStorage(context) {
|
|
31
|
-
const recordType = 'state';
|
|
32
|
-
|
|
33
|
-
const logger = makeLogger(context, 'state_storage');
|
|
34
|
-
const config = context.sysconfig.teraslice;
|
|
35
|
-
const _index = `${config.name}__state`;
|
|
36
|
-
// making this to pass down to backend for dynamic index searches
|
|
37
|
-
const indexName = `${_index}*`;
|
|
38
|
-
const timeseriesFormat = config.index_rollover_frequency.state;
|
|
39
|
-
|
|
40
|
-
const backendConfig = {
|
|
41
|
-
context,
|
|
42
|
-
indexName,
|
|
43
|
-
recordType,
|
|
44
|
-
idField: 'slice_id',
|
|
45
|
-
fullResponse: false,
|
|
46
|
-
logRecord: true,
|
|
47
|
-
forceRefresh: false,
|
|
48
|
-
storageName: 'state'
|
|
49
|
-
};
|
|
50
|
-
|
|
51
|
-
const backend = await elasticsearchBackend(backendConfig);
|
|
52
|
-
|
|
53
|
-
async function createState(exId, slice, state, error) {
|
|
54
|
-
await waitForClient();
|
|
55
|
-
|
|
56
|
-
const { record, index } = _createSliceRecord(exId, slice, state, error);
|
|
57
|
-
|
|
58
|
-
return backend.indexWithId(slice.slice_id, record, index);
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
async function createSlices(exId, slices) {
|
|
62
|
-
await waitForClient();
|
|
63
|
-
|
|
64
|
-
const bulkRequest = slices.map((slice) => {
|
|
65
|
-
const { record, index } = _createSliceRecord(exId, slice, SliceState.pending);
|
|
66
|
-
return {
|
|
67
|
-
action: {
|
|
68
|
-
index: {
|
|
69
|
-
_index: index,
|
|
70
|
-
_type: recordType,
|
|
71
|
-
_id: record.slice_id,
|
|
72
|
-
},
|
|
73
|
-
},
|
|
74
|
-
data: record
|
|
75
|
-
};
|
|
76
|
-
});
|
|
77
|
-
|
|
78
|
-
return backend.bulkSend(bulkRequest);
|
|
79
|
-
}
|
|
80
|
-
|
|
81
|
-
function _createSliceRecord(exId, slice, state, error) {
|
|
82
|
-
if (!SliceState[state]) {
|
|
83
|
-
throw new Error(`Unknown slice state "${state}" on create`);
|
|
84
|
-
}
|
|
85
|
-
const { index } = timeseriesIndex(timeseriesFormat, _index, slice._created);
|
|
86
|
-
const record = {
|
|
87
|
-
slice_id: slice.slice_id,
|
|
88
|
-
slicer_id: slice.slicer_id,
|
|
89
|
-
slicer_order: slice.slicer_order,
|
|
90
|
-
request: JSON.stringify(slice.request),
|
|
91
|
-
state,
|
|
92
|
-
ex_id: exId,
|
|
93
|
-
_created: slice._created,
|
|
94
|
-
_updated: slice._created,
|
|
95
|
-
};
|
|
96
|
-
|
|
97
|
-
if (error) {
|
|
98
|
-
record.error = toString(error);
|
|
99
|
-
}
|
|
100
|
-
return { record, index };
|
|
101
|
-
}
|
|
102
|
-
|
|
103
|
-
async function updateState(slice, state, error) {
|
|
104
|
-
if (!SliceState[state]) {
|
|
105
|
-
throw new Error(`Unknown slice state "${state}" on update`);
|
|
106
|
-
}
|
|
107
|
-
|
|
108
|
-
const indexData = timeseriesIndex(timeseriesFormat, _index, slice._created);
|
|
109
|
-
const record = {
|
|
110
|
-
_updated: indexData.timestamp,
|
|
111
|
-
state
|
|
112
|
-
};
|
|
113
|
-
|
|
114
|
-
// it will usaully just be error
|
|
115
|
-
if (state === SliceState.error || error) {
|
|
116
|
-
if (error) {
|
|
117
|
-
record.error = getFullErrorStack(error);
|
|
118
|
-
} else {
|
|
119
|
-
record.error = new Error('Unkown slice error').stack;
|
|
120
|
-
}
|
|
121
|
-
}
|
|
122
|
-
|
|
123
|
-
let notFoundErrCount = 0;
|
|
124
|
-
|
|
125
|
-
async function update() {
|
|
126
|
-
await waitForClient();
|
|
127
|
-
|
|
128
|
-
try {
|
|
129
|
-
return await backend.update(slice.slice_id, record, indexData.index);
|
|
130
|
-
} catch (_err) {
|
|
131
|
-
const { statusCode, message } = parseErrorInfo(_err);
|
|
132
|
-
let retryable = isRetryableError(_err);
|
|
133
|
-
if (statusCode === 404) {
|
|
134
|
-
notFoundErrCount++;
|
|
135
|
-
retryable = notFoundErrCount < 3;
|
|
136
|
-
} else if (message.includes('Request Timeout')) {
|
|
137
|
-
retryable = true;
|
|
138
|
-
}
|
|
139
|
-
|
|
140
|
-
throw new TSError(_err, {
|
|
141
|
-
retryable,
|
|
142
|
-
reason: `Failure to update ${state} state`
|
|
143
|
-
});
|
|
144
|
-
}
|
|
145
|
-
}
|
|
146
|
-
|
|
147
|
-
return pRetry(update, {
|
|
148
|
-
retries: 10000,
|
|
149
|
-
delay: isTest ? 100 : 1000,
|
|
150
|
-
backoff: 5,
|
|
151
|
-
endWithFatal: true,
|
|
152
|
-
});
|
|
153
|
-
}
|
|
154
|
-
|
|
155
|
-
/**
|
|
156
|
-
* Get the starting position for the slicer
|
|
157
|
-
*
|
|
158
|
-
* @param {string} exId
|
|
159
|
-
* @param {number} slicerId
|
|
160
|
-
* @returns {Promise<import('@terascope/job-components').SlicerRecoveryData>}
|
|
161
|
-
*/
|
|
162
|
-
async function _getSlicerStartingPoint(exId, slicerId) {
|
|
163
|
-
const startQuery = `ex_id:"${exId}" AND slicer_id:"${slicerId}" AND state:${SliceState.completed}`;
|
|
164
|
-
await waitForClient();
|
|
165
|
-
|
|
166
|
-
try {
|
|
167
|
-
const [slice] = await search(startQuery, 0, 1, 'slicer_order:desc');
|
|
168
|
-
const recoveryData = {
|
|
169
|
-
slicer_id: slicerId,
|
|
170
|
-
lastSlice: undefined
|
|
171
|
-
};
|
|
172
|
-
|
|
173
|
-
if (slice) {
|
|
174
|
-
recoveryData.lastSlice = JSON.parse(slice.request);
|
|
175
|
-
logger.info(`last slice process for slicer_id ${slicerId}, ex_id: ${exId} is`, slice.lastSlice);
|
|
176
|
-
}
|
|
177
|
-
|
|
178
|
-
return recoveryData;
|
|
179
|
-
} catch (err) {
|
|
180
|
-
throw new TSError(err, {
|
|
181
|
-
reason: 'Failure getting the newest record'
|
|
182
|
-
});
|
|
183
|
-
}
|
|
184
|
-
}
|
|
185
|
-
|
|
186
|
-
/**
|
|
187
|
-
* Get the starting positions for all of the slicers
|
|
188
|
-
*
|
|
189
|
-
* @param {string} exId
|
|
190
|
-
* @param {number} slicer
|
|
191
|
-
* @returns {Promise<import('@terascope/job-components').SlicerRecoveryData[]>}
|
|
192
|
-
*/
|
|
193
|
-
async function getStartingPoints(exId, slicers) {
|
|
194
|
-
const recoveredSlices = times(slicers, (i) => _getSlicerStartingPoint(exId, i));
|
|
195
|
-
return Promise.all(recoveredSlices);
|
|
196
|
-
}
|
|
197
|
-
|
|
198
|
-
/**
|
|
199
|
-
* @private
|
|
200
|
-
* @param {string} exId
|
|
201
|
-
* @param {number} slicerId
|
|
202
|
-
* @param {import('@terascope/job-components').RecoveryCleanupType} [cleanupType]
|
|
203
|
-
* @returns {string}
|
|
204
|
-
*/
|
|
205
|
-
function _getRecoverSlicesQuery(exId, slicerId, cleanupType) {
|
|
206
|
-
let query = `ex_id:"${exId}"`;
|
|
207
|
-
if (slicerId !== -1) {
|
|
208
|
-
query = `${query} AND slicer_id:"${slicerId}"`;
|
|
209
|
-
}
|
|
210
|
-
|
|
211
|
-
if (cleanupType && cleanupType === RecoveryCleanupType.errors) {
|
|
212
|
-
query = `${query} AND state:"${SliceState.error}"`;
|
|
213
|
-
} else if (cleanupType && cleanupType === RecoveryCleanupType.pending) {
|
|
214
|
-
query = `${query} AND state:"${SliceState.pending}"`;
|
|
215
|
-
} else {
|
|
216
|
-
query = `${query} AND NOT state:"${SliceState.completed}"`;
|
|
217
|
-
}
|
|
218
|
-
logger.debug('recovery slices query:', query);
|
|
219
|
-
return query;
|
|
220
|
-
}
|
|
221
|
-
|
|
222
|
-
/**
|
|
223
|
-
* @param {string} exId
|
|
224
|
-
* @param {number} slicerId
|
|
225
|
-
* @param {import('@terascope/job-components').RecoveryCleanupType} [cleanupType]
|
|
226
|
-
* @returns {Promise<import('@terascope/job-components').Slice[]>}
|
|
227
|
-
*/
|
|
228
|
-
async function recoverSlices(exId, slicerId, cleanupType) {
|
|
229
|
-
const query = _getRecoverSlicesQuery(exId, slicerId, cleanupType);
|
|
230
|
-
// Look for all slices that haven't been completed so they can be retried.
|
|
231
|
-
try {
|
|
232
|
-
await waitForClient();
|
|
233
|
-
await backend.refresh(indexName);
|
|
234
|
-
|
|
235
|
-
const results = await search(query, 0, 5000, 'slicer_order:desc');
|
|
236
|
-
return results.map((doc) => ({
|
|
237
|
-
slice_id: doc.slice_id,
|
|
238
|
-
slicer_id: doc.slicer_id,
|
|
239
|
-
request: JSON.parse(doc.request),
|
|
240
|
-
_created: doc._created
|
|
241
|
-
}));
|
|
242
|
-
} catch (err) {
|
|
243
|
-
throw new TSError(err, {
|
|
244
|
-
reason: 'Failure to get recovered slices'
|
|
245
|
-
});
|
|
246
|
-
}
|
|
247
|
-
}
|
|
248
|
-
|
|
249
|
-
async function search(query, from, size, sort, fields) {
|
|
250
|
-
return backend.search(query, from, size, sort || '_updated:desc', fields);
|
|
251
|
-
}
|
|
252
|
-
|
|
253
|
-
async function count(query, from = 0, sort = '_updated:desc') {
|
|
254
|
-
return backend.count(query, from, sort);
|
|
255
|
-
}
|
|
256
|
-
|
|
257
|
-
async function countByState(exId, state) {
|
|
258
|
-
if (!SliceState[state]) {
|
|
259
|
-
throw new Error(`Unknown slice state "${state}" on update`);
|
|
260
|
-
}
|
|
261
|
-
const query = `ex_id:"${exId}" AND state:${state}`;
|
|
262
|
-
return count(query);
|
|
263
|
-
}
|
|
264
|
-
|
|
265
|
-
async function shutdown(forceShutdown) {
|
|
266
|
-
logger.info('shutting down');
|
|
267
|
-
return backend.shutdown(forceShutdown);
|
|
268
|
-
}
|
|
269
|
-
|
|
270
|
-
async function refresh() {
|
|
271
|
-
const { index } = timeseriesIndex(timeseriesFormat, _index);
|
|
272
|
-
return backend.refresh(index);
|
|
273
|
-
}
|
|
274
|
-
|
|
275
|
-
function verifyClient() {
|
|
276
|
-
return backend.verifyClient();
|
|
277
|
-
}
|
|
278
|
-
|
|
279
|
-
async function waitForClient() {
|
|
280
|
-
return backend.waitForClient();
|
|
281
|
-
}
|
|
282
|
-
|
|
283
|
-
logger.info('state storage initialized');
|
|
284
|
-
return {
|
|
285
|
-
search,
|
|
286
|
-
createState,
|
|
287
|
-
createSlices,
|
|
288
|
-
updateState,
|
|
289
|
-
recoverSlices,
|
|
290
|
-
getStartingPoints,
|
|
291
|
-
count,
|
|
292
|
-
countByState,
|
|
293
|
-
waitForClient,
|
|
294
|
-
verifyClient,
|
|
295
|
-
shutdown,
|
|
296
|
-
refresh,
|
|
297
|
-
};
|
|
298
|
-
}
|
|
299
|
-
|
|
300
|
-
stateStorage.SliceState = SliceState;
|
|
301
|
-
|
|
302
|
-
module.exports = stateStorage;
|
package/lib/utils/api_utils.js
DELETED
|
@@ -1,173 +0,0 @@
|
|
|
1
|
-
'use strict';
|
|
2
|
-
|
|
3
|
-
const Table = require('easy-table');
|
|
4
|
-
const {
|
|
5
|
-
parseErrorInfo,
|
|
6
|
-
parseList,
|
|
7
|
-
logError,
|
|
8
|
-
isString,
|
|
9
|
-
get,
|
|
10
|
-
toInteger,
|
|
11
|
-
} = require('@terascope/utils');
|
|
12
|
-
|
|
13
|
-
function makeTable(req, defaults, data, mappingFn) {
|
|
14
|
-
const query = fieldsQuery(req.query, defaults);
|
|
15
|
-
let emptyChar = 'N/A';
|
|
16
|
-
|
|
17
|
-
// used to create an empty table if there are no jobs
|
|
18
|
-
if (data.length === 0) {
|
|
19
|
-
emptyChar = '';
|
|
20
|
-
data.push({});
|
|
21
|
-
}
|
|
22
|
-
|
|
23
|
-
return Table.print(data, (item, cell) => {
|
|
24
|
-
const fn = mappingFn ? mappingFn(item) : (field) => get(item, field, emptyChar);
|
|
25
|
-
query.forEach((field) => {
|
|
26
|
-
cell(field, fn(field));
|
|
27
|
-
});
|
|
28
|
-
}, (table) => {
|
|
29
|
-
if (('headers' in req.query) && req.query.headers === 'false') {
|
|
30
|
-
return table.print();
|
|
31
|
-
}
|
|
32
|
-
return table.toString();
|
|
33
|
-
});
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
function fieldsQuery(query, defaults) {
|
|
37
|
-
if (!query.fields) {
|
|
38
|
-
return defaults || [];
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
const results = parseList(query.fields);
|
|
42
|
-
|
|
43
|
-
if (results.length === 0) {
|
|
44
|
-
return defaults;
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
return results;
|
|
48
|
-
}
|
|
49
|
-
|
|
50
|
-
function handleRequest(req, res, defaultErrorMsg = 'Failure to process request', { errorCode = 500, successCode = 200 } = {}) {
|
|
51
|
-
logRequest(req);
|
|
52
|
-
return async (fn) => {
|
|
53
|
-
try {
|
|
54
|
-
const result = await fn();
|
|
55
|
-
if (isString(result)) {
|
|
56
|
-
res.status(successCode).send(result);
|
|
57
|
-
} else {
|
|
58
|
-
res.status(successCode).json(result);
|
|
59
|
-
}
|
|
60
|
-
} catch (err) {
|
|
61
|
-
const { statusCode, message } = parseErrorInfo(err, {
|
|
62
|
-
defaultErrorMsg,
|
|
63
|
-
defaultStatusCode: errorCode,
|
|
64
|
-
});
|
|
65
|
-
|
|
66
|
-
if (statusCode >= 500) {
|
|
67
|
-
logError(req.logger, err);
|
|
68
|
-
} else {
|
|
69
|
-
logError(req.logger, message);
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
sendError(res, statusCode, message, req.logger);
|
|
73
|
-
}
|
|
74
|
-
};
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
function sendError(res, code, message, logger) {
|
|
78
|
-
if (res.headersSent) {
|
|
79
|
-
const error = new Error(message);
|
|
80
|
-
error.statusCode = code;
|
|
81
|
-
if (logger) {
|
|
82
|
-
logger.error(error, 'request send error after headers sent');
|
|
83
|
-
} else {
|
|
84
|
-
console.error(error, 'request send error after headers sent');
|
|
85
|
-
}
|
|
86
|
-
return;
|
|
87
|
-
}
|
|
88
|
-
res.status(code).json({
|
|
89
|
-
error: code,
|
|
90
|
-
message
|
|
91
|
-
});
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
// NOTE: This only works for counters, if you're trying to extend this, you
|
|
95
|
-
// should probably switch to using prom-client.
|
|
96
|
-
function makePrometheus(stats, defaultLabels = {}) {
|
|
97
|
-
const metricMapping = {
|
|
98
|
-
processed: 'teraslice_slices_processed',
|
|
99
|
-
failed: 'teraslice_slices_failed',
|
|
100
|
-
queued: 'teraslice_slices_queued',
|
|
101
|
-
job_duration: '', // this isn't really useful, omitting
|
|
102
|
-
workers_joined: 'teraslice_workers_joined',
|
|
103
|
-
workers_disconnected: 'teraslice_workers_disconnected',
|
|
104
|
-
workers_reconnected: 'teraslice_workers_reconnected'
|
|
105
|
-
};
|
|
106
|
-
|
|
107
|
-
let returnString = '';
|
|
108
|
-
Object.entries(stats.controllers).forEach(([key, value]) => {
|
|
109
|
-
const name = metricMapping[key];
|
|
110
|
-
if (name !== '') {
|
|
111
|
-
returnString += `# TYPE ${name} counter\n`;
|
|
112
|
-
const labels = makePrometheusLabels(defaultLabels);
|
|
113
|
-
returnString += `${name}${labels} ${value}\n`;
|
|
114
|
-
}
|
|
115
|
-
});
|
|
116
|
-
return returnString;
|
|
117
|
-
}
|
|
118
|
-
|
|
119
|
-
function makePrometheusLabels(defaults, custom) {
|
|
120
|
-
const labels = Object.assign({}, defaults, custom);
|
|
121
|
-
const keys = Object.keys(labels);
|
|
122
|
-
if (!keys.length) return '';
|
|
123
|
-
|
|
124
|
-
const labelsStr = keys.map((key) => {
|
|
125
|
-
const val = labels[key];
|
|
126
|
-
return `${key}="${val}"`;
|
|
127
|
-
}).join(',');
|
|
128
|
-
|
|
129
|
-
return `{${labelsStr}}`;
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
function isPrometheusRequest(req) {
|
|
133
|
-
const acceptHeader = get(req, 'headers.accept', '');
|
|
134
|
-
return acceptHeader && acceptHeader.indexOf('application/openmetrics-text;') > -1;
|
|
135
|
-
}
|
|
136
|
-
|
|
137
|
-
/**
|
|
138
|
-
* @returns {number}
|
|
139
|
-
*/
|
|
140
|
-
function parseQueryInt(req, prop, defaultVal) {
|
|
141
|
-
const val = req.query[prop];
|
|
142
|
-
if (val == null || val === '') return defaultVal;
|
|
143
|
-
const parsed = toInteger(val);
|
|
144
|
-
// allow the invalid prop to be passed through
|
|
145
|
-
// (because an error should thrown downstream)
|
|
146
|
-
if (parsed === false) return req.query[prop];
|
|
147
|
-
return parsed;
|
|
148
|
-
}
|
|
149
|
-
|
|
150
|
-
function getSearchOptions(req, defaultSort = '_updated:desc') {
|
|
151
|
-
const sort = req.query.sort || defaultSort;
|
|
152
|
-
const size = parseQueryInt(req, 'size', 100);
|
|
153
|
-
const from = parseQueryInt(req, 'from', 0);
|
|
154
|
-
return { size, from, sort };
|
|
155
|
-
}
|
|
156
|
-
|
|
157
|
-
function logRequest(req) {
|
|
158
|
-
const queryInfo = Object.entries(req.query)
|
|
159
|
-
.map(([key, val]) => `${key}: ${val}`)
|
|
160
|
-
.join(', ');
|
|
161
|
-
const { method, path } = req;
|
|
162
|
-
req.logger.trace(`${method.toUpperCase()} ${path} endpoint has been called, ${queryInfo}`);
|
|
163
|
-
}
|
|
164
|
-
|
|
165
|
-
module.exports = {
|
|
166
|
-
isPrometheusRequest,
|
|
167
|
-
makePrometheus,
|
|
168
|
-
makeTable,
|
|
169
|
-
logRequest,
|
|
170
|
-
getSearchOptions,
|
|
171
|
-
handleRequest,
|
|
172
|
-
sendError
|
|
173
|
-
};
|
package/lib/utils/asset_utils.js
DELETED
|
@@ -1,117 +0,0 @@
|
|
|
1
|
-
'use strict';
|
|
2
|
-
|
|
3
|
-
const {
|
|
4
|
-
isInteger, trimStart, trim, getFirst, joinList
|
|
5
|
-
} = require('@terascope/utils');
|
|
6
|
-
const semver = require('semver');
|
|
7
|
-
|
|
8
|
-
function findMatchingAsset(records, name, version) {
|
|
9
|
-
const range = toSemverRange(version);
|
|
10
|
-
const assets = records
|
|
11
|
-
.filter(_isCompatibleAsset(name, range, false))
|
|
12
|
-
.sort((a, b) => semver.rcompare(a.version, b.version));
|
|
13
|
-
|
|
14
|
-
return getFirst(assets);
|
|
15
|
-
}
|
|
16
|
-
|
|
17
|
-
function findSimilarAssets(records, name, version) {
|
|
18
|
-
const range = toSemverRange(version);
|
|
19
|
-
const assets = records
|
|
20
|
-
.filter(_isCompatibleAsset(name, range, true))
|
|
21
|
-
.sort((a, b) => semver.rcompare(a.version, b.version));
|
|
22
|
-
|
|
23
|
-
return assets;
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
function getInCompatibilityReason(assets, prefix) {
|
|
27
|
-
if (!assets || !assets.length) return '';
|
|
28
|
-
|
|
29
|
-
const reasons = [];
|
|
30
|
-
|
|
31
|
-
assets.slice(0, 3).forEach((asset) => {
|
|
32
|
-
if (!isCompatibleNodeVersion(asset.node_version)) {
|
|
33
|
-
reasons.push('node_version');
|
|
34
|
-
}
|
|
35
|
-
if (asset.platform != null && asset.platform !== process.platform) {
|
|
36
|
-
reasons.push('platform');
|
|
37
|
-
}
|
|
38
|
-
if (asset.arch != null && asset.arch !== process.arch) {
|
|
39
|
-
reasons.push('arch');
|
|
40
|
-
}
|
|
41
|
-
});
|
|
42
|
-
|
|
43
|
-
if (!reasons.length) return '';
|
|
44
|
-
|
|
45
|
-
return `${prefix ? `${trim(prefix)} ` : ''}${joinList(reasons, ',', 'or')} mismatch`;
|
|
46
|
-
}
|
|
47
|
-
|
|
48
|
-
function getMajorVersion(version) {
|
|
49
|
-
if (version == null) return version;
|
|
50
|
-
if (isInteger(version)) return version;
|
|
51
|
-
return semver.major(version);
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
const SYSTEM_NODE_VERSION = getMajorVersion(process.version);
|
|
55
|
-
/**
|
|
56
|
-
* This just compares the major version
|
|
57
|
-
*/
|
|
58
|
-
function isCompatibleNodeVersion(version) {
|
|
59
|
-
if (version == null) return true;
|
|
60
|
-
|
|
61
|
-
// anything less than or equal to current node version
|
|
62
|
-
return getMajorVersion(version) <= SYSTEM_NODE_VERSION;
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
function _isCompatibleAsset(name, range, skipRestrictions = false) {
|
|
66
|
-
return (record) => {
|
|
67
|
-
if (record.name !== name) return false;
|
|
68
|
-
if (!semver.satisfies(record.version, range)) {
|
|
69
|
-
return false;
|
|
70
|
-
}
|
|
71
|
-
if (skipRestrictions) return true;
|
|
72
|
-
|
|
73
|
-
if (!isCompatibleNodeVersion(record.node_version)) {
|
|
74
|
-
return false;
|
|
75
|
-
}
|
|
76
|
-
if (record.arch != null && record.arch !== process.arch) {
|
|
77
|
-
return false;
|
|
78
|
-
}
|
|
79
|
-
if (record.platform != null && record.platform !== process.platform) {
|
|
80
|
-
return false;
|
|
81
|
-
}
|
|
82
|
-
return true;
|
|
83
|
-
};
|
|
84
|
-
}
|
|
85
|
-
|
|
86
|
-
function toSemverRange(version) {
|
|
87
|
-
if (!version || version === 'latest') return '*';
|
|
88
|
-
if (semver.validRange(version)) {
|
|
89
|
-
return trimStart(trim(version), 'v');
|
|
90
|
-
}
|
|
91
|
-
|
|
92
|
-
throw new Error(`Version "${version}" is not a valid semver range`);
|
|
93
|
-
}
|
|
94
|
-
|
|
95
|
-
function toVersionQuery(_version) {
|
|
96
|
-
const version = trimStart(trim(_version));
|
|
97
|
-
|
|
98
|
-
if (!version || version === 'latest' || version === '*') {
|
|
99
|
-
return 'version:*';
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
// if there a number and * next to each other that is not valid
|
|
103
|
-
// so lets just return the query: 12.34*.55
|
|
104
|
-
if (/\d\*/.test(version)) return `version:${version}`;
|
|
105
|
-
|
|
106
|
-
const range = new semver.Range(version);
|
|
107
|
-
return `version:${range.range.split(' ').join(' AND version:')}`;
|
|
108
|
-
}
|
|
109
|
-
|
|
110
|
-
module.exports = {
|
|
111
|
-
findSimilarAssets,
|
|
112
|
-
getInCompatibilityReason,
|
|
113
|
-
getMajorVersion,
|
|
114
|
-
findMatchingAsset,
|
|
115
|
-
toSemverRange,
|
|
116
|
-
toVersionQuery,
|
|
117
|
-
};
|
package/lib/utils/date_utils.js
DELETED
|
@@ -1,58 +0,0 @@
|
|
|
1
|
-
'use strict';
|
|
2
|
-
|
|
3
|
-
const { makeISODate } = require('@terascope/utils');
|
|
4
|
-
|
|
5
|
-
function dateOptions(value) {
|
|
6
|
-
const options = {
|
|
7
|
-
year: 'y',
|
|
8
|
-
years: 'y',
|
|
9
|
-
y: 'y',
|
|
10
|
-
months: 'M',
|
|
11
|
-
month: 'M',
|
|
12
|
-
mo: 'M',
|
|
13
|
-
mos: 'M',
|
|
14
|
-
M: 'M',
|
|
15
|
-
weeks: 'w',
|
|
16
|
-
week: 'w',
|
|
17
|
-
wks: 'w',
|
|
18
|
-
wk: 'w',
|
|
19
|
-
w: 'w',
|
|
20
|
-
days: 'd',
|
|
21
|
-
day: 'd',
|
|
22
|
-
d: 'd',
|
|
23
|
-
hours: 'h',
|
|
24
|
-
hour: 'h',
|
|
25
|
-
hr: 'h',
|
|
26
|
-
hrs: 'h',
|
|
27
|
-
h: 'h',
|
|
28
|
-
minutes: 'm',
|
|
29
|
-
minute: 'm',
|
|
30
|
-
min: 'm',
|
|
31
|
-
mins: 'm',
|
|
32
|
-
m: 'm',
|
|
33
|
-
seconds: 's',
|
|
34
|
-
second: 's',
|
|
35
|
-
s: 's',
|
|
36
|
-
milliseconds: 'ms',
|
|
37
|
-
millisecond: 'ms',
|
|
38
|
-
ms: 'ms'
|
|
39
|
-
};
|
|
40
|
-
|
|
41
|
-
if (options[value]) {
|
|
42
|
-
return options[value];
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
throw new Error(`the time descriptor of "${value}" for the interval is malformed`);
|
|
46
|
-
}
|
|
47
|
-
|
|
48
|
-
function timeseriesIndex(timeseriesFormat, index, dateStr) {
|
|
49
|
-
const timestamp = makeISODate();
|
|
50
|
-
const formatter = { daily: 10, monthly: 7, yearly: 4 };
|
|
51
|
-
const dateString = dateStr || timestamp;
|
|
52
|
-
return { index: `${index}-${dateString.slice(0, formatter[timeseriesFormat]).replace(/-/g, '.')}`, timestamp };
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
module.exports = {
|
|
56
|
-
dateOptions,
|
|
57
|
-
timeseriesIndex
|
|
58
|
-
};
|
|
@@ -1,29 +0,0 @@
|
|
|
1
|
-
'use strict';
|
|
2
|
-
|
|
3
|
-
const _ = require('lodash');
|
|
4
|
-
|
|
5
|
-
function safeEncode(obj) {
|
|
6
|
-
let str;
|
|
7
|
-
if (_.isString(obj)) {
|
|
8
|
-
str = obj;
|
|
9
|
-
} else if (_.isObjectLike(obj)) {
|
|
10
|
-
str = JSON.stringify(obj);
|
|
11
|
-
}
|
|
12
|
-
return Buffer.from(str).toString('base64');
|
|
13
|
-
}
|
|
14
|
-
|
|
15
|
-
function safeDecode(str) {
|
|
16
|
-
if (!_.isString(str) && _.isObjectLike(str)) {
|
|
17
|
-
return str;
|
|
18
|
-
}
|
|
19
|
-
try {
|
|
20
|
-
return JSON.parse(Buffer.from(str, 'base64').toString('utf-8'));
|
|
21
|
-
} catch (err) {
|
|
22
|
-
throw new Error(`Unable to decode ${str}`);
|
|
23
|
-
}
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
module.exports = {
|
|
27
|
-
safeEncode,
|
|
28
|
-
safeDecode
|
|
29
|
-
};
|