screwdriver-queue-service 2.0.15 → 2.0.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/config/custom-environment-variables.yaml +19 -0
- package/config/default.yaml +20 -0
- package/lib/queue.js +1 -14
- package/package.json +6 -7
- package/plugins/helper.js +54 -1
- package/plugins/queue/put.js +3 -0
- package/plugins/queue/scheduler.js +31 -6
- package/plugins/worker/lib/jobs.js +120 -69
- package/plugins/worker/worker.js +1 -1
|
@@ -272,3 +272,22 @@ scheduler:
|
|
|
272
272
|
vhost: RABBITMQ_VHOST
|
|
273
273
|
# Connection options
|
|
274
274
|
connectOptions: RABBITMQ_CONNECT_OPTIONS
|
|
275
|
+
kafka:
|
|
276
|
+
# flag for kafka broker
|
|
277
|
+
enabled: KAFKA_ENABLED
|
|
278
|
+
# kafka brokers list
|
|
279
|
+
hosts: KAFKA_BROKERS_LIST
|
|
280
|
+
# sasl options
|
|
281
|
+
sasl:
|
|
282
|
+
# sasl mechanism
|
|
283
|
+
mechanism: SASL_MECHANISM
|
|
284
|
+
# secret id for sasl/scram
|
|
285
|
+
secretId: SASL_AWS_SECRET_ID
|
|
286
|
+
# client id of the producer
|
|
287
|
+
clientId: KAFKA_CLIENT_ID
|
|
288
|
+
# Amazon access key
|
|
289
|
+
accessKeyId: KAFKA_ACCESS_KEY_ID
|
|
290
|
+
# Amazon secret access key
|
|
291
|
+
secretAccessKey: KAFKA_ACCESS_KEY_SECRET
|
|
292
|
+
# AWS region
|
|
293
|
+
region: AWS_REGION
|
package/config/default.yaml
CHANGED
|
@@ -202,3 +202,23 @@ scheduler:
|
|
|
202
202
|
vhost: /screwdriver
|
|
203
203
|
# Connect Options
|
|
204
204
|
connectOptions: { json: true, heartbeatIntervalInSeconds: 20, reconnectTimeInSeconds: 30 }
|
|
205
|
+
|
|
206
|
+
kafka:
|
|
207
|
+
# flag for kafka broker
|
|
208
|
+
enabled: false
|
|
209
|
+
# kafka brokers list
|
|
210
|
+
hosts: KAFKA_BROKERS_LIST
|
|
211
|
+
# sasl options
|
|
212
|
+
sasl:
|
|
213
|
+
# sasl mechanism
|
|
214
|
+
mechanism: scram-sha-512
|
|
215
|
+
# secret id for sasl/scram
|
|
216
|
+
secretId: fake-secret
|
|
217
|
+
# client id of the producer
|
|
218
|
+
clientId: sd-producer
|
|
219
|
+
# Amazon access key
|
|
220
|
+
accessKeyId: KAFKA_ACCESS_KEY_ID
|
|
221
|
+
# Amazon secret access key
|
|
222
|
+
secretAccessKey: KAFKA_ACCESS_KEY_SECRET
|
|
223
|
+
# AWS region
|
|
224
|
+
region: AWS_REGION
|
package/lib/queue.js
CHANGED
|
@@ -35,6 +35,7 @@ module.exports = class ExecutorQueue {
|
|
|
35
35
|
this.timeoutQueue = `${this.prefix}timeoutConfigs`;
|
|
36
36
|
this.cacheQueue = `${this.prefix}cache`;
|
|
37
37
|
this.unzipQueue = `${this.prefix}unzip`;
|
|
38
|
+
this.webhookQueue = `${this.prefix}webhooks`;
|
|
38
39
|
|
|
39
40
|
const redisConnection = { ...config.redisConnection, pkg: 'ioredis' };
|
|
40
41
|
|
|
@@ -57,20 +58,6 @@ module.exports = class ExecutorQueue {
|
|
|
57
58
|
this.redis[funcName](...args),
|
|
58
59
|
breakerOptions
|
|
59
60
|
);
|
|
60
|
-
this.requestRetryStrategy = response => {
|
|
61
|
-
if (Math.floor(response.statusCode / 100) !== 2) {
|
|
62
|
-
throw new Error('Retry limit reached');
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
return response;
|
|
66
|
-
};
|
|
67
|
-
this.requestRetryStrategyPostEvent = response => {
|
|
68
|
-
if (Math.floor(response.statusCode / 100) !== 2 && response.statusCode !== 404) {
|
|
69
|
-
throw new Error('Retry limit reached');
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
return response;
|
|
73
|
-
};
|
|
74
61
|
this.fuseBox = new FuseBox();
|
|
75
62
|
this.fuseBox.addFuse(this.queueBreaker);
|
|
76
63
|
this.fuseBox.addFuse(this.redisBreaker);
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "screwdriver-queue-service",
|
|
3
|
-
"version": "2.0.
|
|
3
|
+
"version": "2.0.19",
|
|
4
4
|
"description": "Screwdriver Queue Service API",
|
|
5
5
|
"main": "app.js",
|
|
6
6
|
"directories": {
|
|
@@ -28,7 +28,8 @@
|
|
|
28
28
|
"node-resque": "^5.5.3",
|
|
29
29
|
"npm-auto-version": "^1.0.0",
|
|
30
30
|
"redlock": "^4.2.0",
|
|
31
|
-
"screwdriver-
|
|
31
|
+
"screwdriver-aws-producer-service": "^1.1.0",
|
|
32
|
+
"screwdriver-data-schema": "^21.10.2",
|
|
32
33
|
"screwdriver-executor-docker": "^5.0.2",
|
|
33
34
|
"screwdriver-executor-jenkins": "^5.0.1",
|
|
34
35
|
"screwdriver-executor-k8s": "^14.14.4",
|
|
@@ -60,7 +61,8 @@
|
|
|
60
61
|
"pretest": "eslint . --quiet",
|
|
61
62
|
"test": "nyc --report-dir ./artifacts/coverage --reporter=lcov mocha --reporter mocha-multi-reporters --reporter-options configFile=./mocha.config.json --recursive --timeout 4000 --retries 1 --exit --allow-uncaught true --color true",
|
|
62
63
|
"debug": "node --nolazy ./bin/server",
|
|
63
|
-
"functional": "cucumber-js --format=progress --tags 'not @ignore' --retry 2 --fail-fast --exit"
|
|
64
|
+
"functional": "cucumber-js --format=progress --tags 'not @ignore' --retry 2 --fail-fast --exit",
|
|
65
|
+
"semantic-release": "semantic-release"
|
|
64
66
|
},
|
|
65
67
|
"repository": {
|
|
66
68
|
"type": "git",
|
|
@@ -85,9 +87,6 @@
|
|
|
85
87
|
},
|
|
86
88
|
"homepage": "https://github.com/screwdriver-cd/screwdriver-queue-service#readme",
|
|
87
89
|
"release": {
|
|
88
|
-
"debug": false
|
|
89
|
-
"verifyConditions": {
|
|
90
|
-
"path": "./node_modules/semantic-release/src/lib/plugin-noop.js"
|
|
91
|
-
}
|
|
90
|
+
"debug": false
|
|
92
91
|
}
|
|
93
92
|
}
|
package/plugins/helper.js
CHANGED
|
@@ -7,6 +7,34 @@ const { queuePrefix } = require('../config/redis');
|
|
|
7
7
|
const RETRY_LIMIT = 3;
|
|
8
8
|
const RETRY_DELAY = 5;
|
|
9
9
|
|
|
10
|
+
/**
|
|
11
|
+
* Callback function to retry when HTTP status code is not 2xx
|
|
12
|
+
* @param {Object} response
|
|
13
|
+
* @param {Function} retryWithMergedOptions
|
|
14
|
+
* @return {Object} response
|
|
15
|
+
*/
|
|
16
|
+
function requestRetryStrategy(response) {
|
|
17
|
+
if (Math.floor(response.statusCode / 100) !== 2) {
|
|
18
|
+
throw new Error('Retry limit reached');
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
return response;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Callback function to retry when HTTP status code is not 2xx and 404
|
|
26
|
+
* @param {Object} response
|
|
27
|
+
* @param {Function} retryWithMergedOptions
|
|
28
|
+
* @return {Object} response
|
|
29
|
+
*/
|
|
30
|
+
function requestRetryStrategyPostEvent(response) {
|
|
31
|
+
if (Math.floor(response.statusCode / 100) !== 2 && response.statusCode !== 404) {
|
|
32
|
+
throw new Error('Retry limit reached');
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
return response;
|
|
36
|
+
}
|
|
37
|
+
|
|
10
38
|
/**
|
|
11
39
|
*
|
|
12
40
|
* @param {String} method
|
|
@@ -218,11 +246,36 @@ async function updateBuild(updateConfig, retryStrategyFn) {
|
|
|
218
246
|
);
|
|
219
247
|
}
|
|
220
248
|
|
|
249
|
+
/**
|
|
250
|
+
* Post the webhooks process
|
|
251
|
+
* @method processHooks
|
|
252
|
+
* @param {String} apiUri
|
|
253
|
+
* @param {String} token
|
|
254
|
+
* @param {String} webhookConfig as JSON format
|
|
255
|
+
* @param {Function} retryStrategyFn
|
|
256
|
+
* @return {Promise} response or error
|
|
257
|
+
*/
|
|
258
|
+
async function processHooks(apiUri, token, webhookConfig, retryStrategyFn) {
|
|
259
|
+
return request(formatOptions('POST', `${apiUri}/v4/processHooks`, token, webhookConfig, retryStrategyFn)).then(
|
|
260
|
+
res => {
|
|
261
|
+
logger.info(`POST /v4/processHooks completed, ${res.statusCode}, ${JSON.stringify(res.body)}`);
|
|
262
|
+
if ([200, 201, 204].includes(res.statusCode)) {
|
|
263
|
+
return res;
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
throw new Error(`Failed to process webhook with ${res.statusCode} code and ${res.body}`);
|
|
267
|
+
}
|
|
268
|
+
);
|
|
269
|
+
}
|
|
270
|
+
|
|
221
271
|
module.exports = {
|
|
272
|
+
requestRetryStrategy,
|
|
273
|
+
requestRetryStrategyPostEvent,
|
|
222
274
|
updateBuildStatus,
|
|
223
275
|
updateStepStop,
|
|
224
276
|
getCurrentStep,
|
|
225
277
|
createBuildEvent,
|
|
226
278
|
getPipelineAdmin,
|
|
227
|
-
updateBuild
|
|
279
|
+
updateBuild,
|
|
280
|
+
processHooks
|
|
228
281
|
};
|
package/plugins/queue/put.js
CHANGED
|
@@ -36,6 +36,9 @@ module.exports = () => ({
|
|
|
36
36
|
case 'unzip':
|
|
37
37
|
await scheduler.unzipArtifacts(executor, request.payload);
|
|
38
38
|
break;
|
|
39
|
+
case 'webhook':
|
|
40
|
+
await scheduler.queueWebhook(executor, request.payload);
|
|
41
|
+
break;
|
|
39
42
|
default:
|
|
40
43
|
await scheduler.start(executor, request.payload);
|
|
41
44
|
break;
|
|
@@ -38,7 +38,7 @@ async function postBuildEvent(executor, eventConfig) {
|
|
|
38
38
|
scope: ['user']
|
|
39
39
|
});
|
|
40
40
|
|
|
41
|
-
const admin = await helper.getPipelineAdmin(token, apiUri, pipelineId,
|
|
41
|
+
const admin = await helper.getPipelineAdmin(token, apiUri, pipelineId, helper.requestRetryStrategy);
|
|
42
42
|
|
|
43
43
|
if (admin) {
|
|
44
44
|
logger.info(
|
|
@@ -64,7 +64,7 @@ async function postBuildEvent(executor, eventConfig) {
|
|
|
64
64
|
buildEvent.buildId = buildId;
|
|
65
65
|
}
|
|
66
66
|
|
|
67
|
-
await helper.createBuildEvent(apiUri, jwt, buildEvent,
|
|
67
|
+
await helper.createBuildEvent(apiUri, jwt, buildEvent, helper.requestRetryStrategyPostEvent);
|
|
68
68
|
} else {
|
|
69
69
|
logger.error(
|
|
70
70
|
`POST event for pipeline failed as no admin found: ${pipelineId}:${job.name}:${job.id}:${buildId}`
|
|
@@ -337,7 +337,7 @@ async function start(executor, config) {
|
|
|
337
337
|
apiUri,
|
|
338
338
|
payload
|
|
339
339
|
},
|
|
340
|
-
|
|
340
|
+
helper.requestRetryStrategy
|
|
341
341
|
)
|
|
342
342
|
.catch(err => {
|
|
343
343
|
logger.error(`frozenBuilds: failed to update build status for build ${buildId}: ${err}`);
|
|
@@ -399,7 +399,7 @@ async function start(executor, config) {
|
|
|
399
399
|
apiUri,
|
|
400
400
|
payload: { stats: build.stats, status: 'QUEUED' }
|
|
401
401
|
},
|
|
402
|
-
|
|
402
|
+
helper.requestRetryStrategy
|
|
403
403
|
);
|
|
404
404
|
}
|
|
405
405
|
}
|
|
@@ -652,7 +652,7 @@ async function stop(executor, config) {
|
|
|
652
652
|
}
|
|
653
653
|
|
|
654
654
|
/**
|
|
655
|
-
* Cleanup any
|
|
655
|
+
* Cleanup any related processing
|
|
656
656
|
*/
|
|
657
657
|
async function cleanUp(executor) {
|
|
658
658
|
try {
|
|
@@ -716,6 +716,30 @@ async function unzipArtifacts(executor, config) {
|
|
|
716
716
|
return enq;
|
|
717
717
|
}
|
|
718
718
|
|
|
719
|
+
/**
|
|
720
|
+
* Pushes webhooks to redis
|
|
721
|
+
* @async queueWebhook
|
|
722
|
+
* @param {Object} executor
|
|
723
|
+
* @param {Object} webhookConfig
|
|
724
|
+
* @return {Promise}
|
|
725
|
+
*/
|
|
726
|
+
async function queueWebhook(executor, webhookConfig) {
|
|
727
|
+
await executor.connect();
|
|
728
|
+
|
|
729
|
+
return executor.queueBreaker.runCommand(
|
|
730
|
+
'enqueue',
|
|
731
|
+
executor.webhookQueue,
|
|
732
|
+
'sendWebhook',
|
|
733
|
+
JSON.stringify({
|
|
734
|
+
webhookConfig,
|
|
735
|
+
token: executor.tokenGen({
|
|
736
|
+
service: 'queue',
|
|
737
|
+
scope: ['webhook_worker']
|
|
738
|
+
})
|
|
739
|
+
})
|
|
740
|
+
);
|
|
741
|
+
}
|
|
742
|
+
|
|
719
743
|
module.exports = {
|
|
720
744
|
init,
|
|
721
745
|
start,
|
|
@@ -728,5 +752,6 @@ module.exports = {
|
|
|
728
752
|
stopTimer,
|
|
729
753
|
cleanUp,
|
|
730
754
|
clearCache,
|
|
731
|
-
unzipArtifacts
|
|
755
|
+
unzipArtifacts,
|
|
756
|
+
queueWebhook
|
|
732
757
|
};
|
|
@@ -6,6 +6,8 @@ const config = require('config');
|
|
|
6
6
|
const hoek = require('@hapi/hoek');
|
|
7
7
|
const ExecutorRouter = require('screwdriver-executor-router');
|
|
8
8
|
const logger = require('screwdriver-logger');
|
|
9
|
+
const AWSProducer = require('screwdriver-aws-producer-service');
|
|
10
|
+
const helper = require('../../helper');
|
|
9
11
|
const { BlockedBy } = require('./BlockedBy');
|
|
10
12
|
const { Filter } = require('./Filter');
|
|
11
13
|
const { CacheFilter } = require('./CacheFilter');
|
|
@@ -13,9 +15,10 @@ const blockedByConfig = config.get('plugins').blockedBy;
|
|
|
13
15
|
const { connectionDetails, queuePrefix, runningJobsPrefix, waitingJobsPrefix } = require('../../../config/redis');
|
|
14
16
|
const rabbitmqConf = require('../../../config/rabbitmq');
|
|
15
17
|
const { amqpURI, exchange, connectOptions } = rabbitmqConf.getConfig();
|
|
18
|
+
const kafkaEnabled = config.get('kafka').enabled === 'true';
|
|
16
19
|
|
|
17
20
|
const RETRY_LIMIT = 3;
|
|
18
|
-
// This is in milliseconds, reference: https://github.com/
|
|
21
|
+
// This is in milliseconds, reference: https://github.com/actionhero/node-resque/blob/2ffdf0/lib/plugins/Retry.js#L12
|
|
19
22
|
const RETRY_DELAY = 5 * 1000;
|
|
20
23
|
const redis = new Redis(connectionDetails.port, connectionDetails.host, connectionDetails.options);
|
|
21
24
|
|
|
@@ -72,7 +75,7 @@ function getRabbitmqConn() {
|
|
|
72
75
|
logger.info('Creating new rabbitmq connection.');
|
|
73
76
|
|
|
74
77
|
rabbitmqConn.on('connect', () => logger.info('Connected to rabbitmq!'));
|
|
75
|
-
rabbitmqConn.on('disconnect', params => logger.info(
|
|
78
|
+
rabbitmqConn.on('disconnect', params => logger.info(`Disconnected from rabbitmq: ${params.err.stack}`));
|
|
76
79
|
|
|
77
80
|
return rabbitmqConn;
|
|
78
81
|
}
|
|
@@ -83,17 +86,23 @@ function getRabbitmqConn() {
|
|
|
83
86
|
* @param {String} queue
|
|
84
87
|
* @param {String} messageId
|
|
85
88
|
*/
|
|
86
|
-
function pushToRabbitMq(message, queue, messageId) {
|
|
89
|
+
async function pushToRabbitMq(message, queue, messageId) {
|
|
87
90
|
if (!rabbitmqConf.getConfig().schedulerMode) {
|
|
88
91
|
return Promise.resolve();
|
|
89
92
|
}
|
|
90
|
-
|
|
93
|
+
|
|
94
|
+
const conn = getRabbitmqConn();
|
|
95
|
+
const channelWrapper = conn.createChannel({
|
|
91
96
|
json: true,
|
|
92
97
|
setup: channel => channel.checkExchange(exchange)
|
|
93
98
|
});
|
|
94
99
|
|
|
95
100
|
logger.info('publishing msg to rabbitmq: %s', messageId);
|
|
96
101
|
|
|
102
|
+
channelWrapper.on('error', (error, { name }) => {
|
|
103
|
+
logger.error(`channel wrapper error ${error}:${name}`);
|
|
104
|
+
});
|
|
105
|
+
|
|
97
106
|
return channelWrapper
|
|
98
107
|
.publish(exchange, queue, message, {
|
|
99
108
|
contentType: 'application/json',
|
|
@@ -107,11 +116,24 @@ function pushToRabbitMq(message, queue, messageId) {
|
|
|
107
116
|
.catch(err => {
|
|
108
117
|
logger.error('publishing failed to rabbitmq: %s', err.message);
|
|
109
118
|
channelWrapper.close();
|
|
110
|
-
|
|
119
|
+
conn.close();
|
|
111
120
|
throw err;
|
|
112
121
|
});
|
|
113
122
|
}
|
|
114
123
|
|
|
124
|
+
/**
|
|
125
|
+
* Push message to Kafka topic
|
|
126
|
+
* @param {Object} message Job and build config metadata
|
|
127
|
+
* @param {String} topic Topic name
|
|
128
|
+
*/
|
|
129
|
+
async function pushToKafka(message, topic) {
|
|
130
|
+
const conn = await AWSProducer.connect();
|
|
131
|
+
|
|
132
|
+
if (conn) {
|
|
133
|
+
await AWSProducer.sendMessage(message, topic);
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
|
|
115
137
|
/**
|
|
116
138
|
* Schedule a job based on mode
|
|
117
139
|
* @method schedule
|
|
@@ -119,7 +141,7 @@ function pushToRabbitMq(message, queue, messageId) {
|
|
|
119
141
|
* @param {Object} buildConfig build config
|
|
120
142
|
* @return {Promise}
|
|
121
143
|
*/
|
|
122
|
-
function schedule(job, buildConfig) {
|
|
144
|
+
async function schedule(job, buildConfig) {
|
|
123
145
|
const buildCluster = buildConfig.buildClusterName;
|
|
124
146
|
|
|
125
147
|
delete buildConfig.buildClusterName;
|
|
@@ -129,8 +151,20 @@ function schedule(job, buildConfig) {
|
|
|
129
151
|
buildConfig
|
|
130
152
|
};
|
|
131
153
|
|
|
154
|
+
if (kafkaEnabled && buildConfig.provider) {
|
|
155
|
+
const { accountId, region } = buildConfig.provider;
|
|
156
|
+
const topic = `builds-${accountId}-${region}`;
|
|
157
|
+
|
|
158
|
+
return pushToKafka(msg, topic);
|
|
159
|
+
}
|
|
160
|
+
|
|
132
161
|
if (rabbitmqConf.getConfig().schedulerMode) {
|
|
133
|
-
|
|
162
|
+
try {
|
|
163
|
+
return await pushToRabbitMq(msg, buildCluster, buildConfig.buildId);
|
|
164
|
+
} catch (err) {
|
|
165
|
+
logger.error(`err in pushing to rabbitmq: ${err}`);
|
|
166
|
+
throw err;
|
|
167
|
+
}
|
|
134
168
|
}
|
|
135
169
|
|
|
136
170
|
// token is not allowed in executor.stop
|
|
@@ -150,15 +184,17 @@ function schedule(job, buildConfig) {
|
|
|
150
184
|
* @param {String} buildConfig.blockedBy Jobs that are blocking this job
|
|
151
185
|
* @return {Promise}
|
|
152
186
|
*/
|
|
153
|
-
function start(buildConfig) {
|
|
154
|
-
|
|
155
|
-
.hget(`${queuePrefix}buildConfigs`, buildConfig.buildId)
|
|
156
|
-
.then(fullBuildConfig => schedule('start', JSON.parse(fullBuildConfig)))
|
|
157
|
-
.catch(err => {
|
|
158
|
-
logger.error(`err in start job: ${err}`);
|
|
187
|
+
async function start(buildConfig) {
|
|
188
|
+
try {
|
|
189
|
+
const fullBuildConfig = await redis.hget(`${queuePrefix}buildConfigs`, buildConfig.buildId);
|
|
159
190
|
|
|
160
|
-
|
|
161
|
-
|
|
191
|
+
await schedule('start', JSON.parse(fullBuildConfig));
|
|
192
|
+
|
|
193
|
+
return null;
|
|
194
|
+
} catch (err) {
|
|
195
|
+
logger.error(`err in start job: ${err}`);
|
|
196
|
+
throw err;
|
|
197
|
+
}
|
|
162
198
|
}
|
|
163
199
|
|
|
164
200
|
/**
|
|
@@ -169,79 +205,87 @@ function start(buildConfig) {
|
|
|
169
205
|
* @param {String} buildConfig.jobId Job that this build belongs to
|
|
170
206
|
* @param {String} buildConfig.blockedBy Jobs that are blocking this job
|
|
171
207
|
* @param {String} buildConfig.started Whether job has started
|
|
208
|
+
* @param {String} buildConfig.jobName Job name
|
|
172
209
|
* @return {Promise}
|
|
173
210
|
*/
|
|
174
|
-
function stop(buildConfig) {
|
|
211
|
+
async function stop(buildConfig) {
|
|
175
212
|
const started = hoek.reach(buildConfig, 'started', { default: true }); // default value for backward compatibility
|
|
176
|
-
const { buildId, jobId } = buildConfig;
|
|
177
|
-
|
|
213
|
+
const { buildId, jobId, jobName } = buildConfig;
|
|
214
|
+
let stopConfig = { buildId, jobId, jobName };
|
|
178
215
|
const runningKey = `${runningJobsPrefix}${jobId}`;
|
|
179
216
|
|
|
180
|
-
|
|
181
|
-
redis
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
217
|
+
try {
|
|
218
|
+
const fullBuildConfig = await redis.hget(`${queuePrefix}buildConfigs`, buildId);
|
|
219
|
+
const parsedConfig = JSON.parse(fullBuildConfig);
|
|
220
|
+
|
|
221
|
+
if (parsedConfig) {
|
|
222
|
+
stopConfig = {
|
|
223
|
+
buildId,
|
|
224
|
+
...parsedConfig
|
|
225
|
+
};
|
|
226
|
+
}
|
|
227
|
+
} catch (err) {
|
|
228
|
+
logger.error(`[Stop Build] failed to get config for build ${buildId}: ${err.message}`);
|
|
229
|
+
}
|
|
185
230
|
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
231
|
+
await redis.hdel(`${queuePrefix}buildConfigs`, buildId);
|
|
232
|
+
// If this is a running job
|
|
233
|
+
const runningBuildId = await redis.get(runningKey);
|
|
189
234
|
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
235
|
+
if (parseInt(runningBuildId, 10) === buildId) {
|
|
236
|
+
await redis.del(runningKey);
|
|
237
|
+
}
|
|
238
|
+
// If this is a waiting job
|
|
239
|
+
await redis.lrem(`${waitingJobsPrefix}${jobId}`, 0, buildId);
|
|
193
240
|
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
.then(() => redis.hdel(`${queuePrefix}buildConfigs`, buildId))
|
|
200
|
-
// If this is a running job
|
|
201
|
-
.then(() => redis.get(runningKey))
|
|
202
|
-
.then(runningBuildId => {
|
|
203
|
-
if (parseInt(runningBuildId, 10) === buildId) {
|
|
204
|
-
return redis.del(runningKey);
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
return null;
|
|
208
|
-
})
|
|
209
|
-
// If this is a waiting job
|
|
210
|
-
.then(() => redis.lrem(`${waitingJobsPrefix}${jobId}`, 0, buildId))
|
|
211
|
-
.then(() => (started ? schedule('stop', stopConfig) : null))
|
|
212
|
-
);
|
|
241
|
+
if (started) {
|
|
242
|
+
await schedule('stop', stopConfig);
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
return null;
|
|
213
246
|
}
|
|
214
247
|
|
|
215
248
|
/**
|
|
216
249
|
* Send message to clear cache from disk
|
|
217
250
|
* @param {Object} cacheConfig
|
|
218
251
|
*/
|
|
219
|
-
function clear(cacheConfig) {
|
|
252
|
+
async function clear(cacheConfig) {
|
|
220
253
|
const { id, buildClusters } = cacheConfig;
|
|
221
|
-
|
|
254
|
+
const data = await redis.hget(`${queuePrefix}buildConfigs`, id);
|
|
222
255
|
|
|
223
|
-
|
|
224
|
-
.
|
|
225
|
-
.then(data => {
|
|
226
|
-
if (data) {
|
|
227
|
-
const buildConfig = JSON.parse(data);
|
|
256
|
+
if (data) {
|
|
257
|
+
const buildConfig = JSON.parse(data);
|
|
228
258
|
|
|
229
|
-
|
|
230
|
-
}
|
|
231
|
-
})
|
|
232
|
-
.then(() => {
|
|
233
|
-
if (queueName) {
|
|
234
|
-
return pushToRabbitMq({ job: 'clear', cacheConfig }, queueName, id);
|
|
235
|
-
}
|
|
259
|
+
const queueName = buildConfig.buildClusterName;
|
|
236
260
|
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
}
|
|
261
|
+
if (queueName) {
|
|
262
|
+
await pushToRabbitMq({ job: 'clear', cacheConfig }, queueName, id);
|
|
263
|
+
}
|
|
264
|
+
}
|
|
242
265
|
|
|
243
|
-
|
|
244
|
-
|
|
266
|
+
if (buildClusters) {
|
|
267
|
+
await Promise.all(
|
|
268
|
+
buildClusters.map(async cluster => {
|
|
269
|
+
return pushToRabbitMq({ job: 'clear', cacheConfig }, cluster, id);
|
|
270
|
+
})
|
|
271
|
+
);
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
return null;
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
/**
|
|
278
|
+
* Send message to processHooks API
|
|
279
|
+
* @param {String} configs as String
|
|
280
|
+
*/
|
|
281
|
+
async function sendWebhook(configs) {
|
|
282
|
+
const parsedConfig = JSON.parse(configs);
|
|
283
|
+
const { webhookConfig, token } = parsedConfig;
|
|
284
|
+
const apiUri = ecosystem.api;
|
|
285
|
+
|
|
286
|
+
await helper.processHooks(apiUri, token, webhookConfig, helper.requestRetryStrategyPostEvent);
|
|
287
|
+
|
|
288
|
+
return null;
|
|
245
289
|
}
|
|
246
290
|
|
|
247
291
|
module.exports = {
|
|
@@ -266,5 +310,12 @@ module.exports = {
|
|
|
266
310
|
Retry: retryOptions
|
|
267
311
|
},
|
|
268
312
|
perform: clear
|
|
313
|
+
},
|
|
314
|
+
sendWebhook: {
|
|
315
|
+
plugins: ['Retry'],
|
|
316
|
+
pluginOptions: {
|
|
317
|
+
Retry: retryOptions
|
|
318
|
+
},
|
|
319
|
+
perform: sendWebhook
|
|
269
320
|
}
|
|
270
321
|
};
|
package/plugins/worker/worker.js
CHANGED
|
@@ -42,7 +42,7 @@ async function shutDownAll(worker, scheduler) {
|
|
|
42
42
|
const multiWorker = new NodeResque.MultiWorker(
|
|
43
43
|
{
|
|
44
44
|
connection: connectionDetails,
|
|
45
|
-
queues: [`${queuePrefix}builds`, `${queuePrefix}cache`],
|
|
45
|
+
queues: [`${queuePrefix}builds`, `${queuePrefix}cache`, `${queuePrefix}webhooks`],
|
|
46
46
|
minTaskProcessors: workerConfig.minTaskProcessors,
|
|
47
47
|
maxTaskProcessors: workerConfig.maxTaskProcessors,
|
|
48
48
|
checkTimeout: workerConfig.checkTimeout,
|