screwdriver-queue-service 2.0.22 → 2.0.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +2 -2
- package/plugins/worker/lib/jobs.js +36 -7
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "screwdriver-queue-service",
|
|
3
|
-
"version": "2.0.
|
|
3
|
+
"version": "2.0.23",
|
|
4
4
|
"description": "Screwdriver Queue Service API",
|
|
5
5
|
"main": "app.js",
|
|
6
6
|
"directories": {
|
|
@@ -29,7 +29,7 @@
|
|
|
29
29
|
"npm-auto-version": "^1.0.0",
|
|
30
30
|
"redlock": "^4.2.0",
|
|
31
31
|
"screwdriver-aws-producer-service": "^1.3.0",
|
|
32
|
-
"screwdriver-data-schema": "^21.
|
|
32
|
+
"screwdriver-data-schema": "^21.17.0",
|
|
33
33
|
"screwdriver-executor-docker": "^5.0.2",
|
|
34
34
|
"screwdriver-executor-jenkins": "^5.0.1",
|
|
35
35
|
"screwdriver-executor-k8s": "^14.16.0",
|
|
@@ -20,6 +20,7 @@ const { kafkaEnabled, useShortRegionName } = kafkaConfig.get();
|
|
|
20
20
|
const RETRY_LIMIT = 3;
|
|
21
21
|
// This is in milliseconds, reference: https://github.com/actionhero/node-resque/blob/2ffdf0/lib/plugins/Retry.js#L12
|
|
22
22
|
const RETRY_DELAY = 5 * 1000;
|
|
23
|
+
const DEFAULT_BUILD_TIMEOUT = 90;
|
|
23
24
|
const redis = new Redis(connectionDetails.port, connectionDetails.host, connectionDetails.options);
|
|
24
25
|
|
|
25
26
|
const ecosystem = config.get('ecosystem');
|
|
@@ -163,6 +164,35 @@ function getTopicName(accountId, region) {
|
|
|
163
164
|
|
|
164
165
|
return `builds-${accountId}-${shortRegion}`;
|
|
165
166
|
}
|
|
167
|
+
|
|
168
|
+
/**
|
|
169
|
+
*
|
|
170
|
+
* @param {String} job type of job start|stop`
|
|
171
|
+
* @param {*} buildConfig
|
|
172
|
+
* @returns
|
|
173
|
+
*/
|
|
174
|
+
function getKafkaMessageRequest(job, buildConfig) {
|
|
175
|
+
const { accountId, region, executor: executorType } = buildConfig.provider;
|
|
176
|
+
|
|
177
|
+
const topic = getTopicName(accountId, region);
|
|
178
|
+
const messageId = `${job}-${buildConfig.buildId}`;
|
|
179
|
+
|
|
180
|
+
const timeout = parseInt(hoek.reach(buildConfig, 'annotations>screwdriver.cd/timeout', { separator: '>' }), 10);
|
|
181
|
+
const buildTimeout = Number.isNaN(timeout) ? DEFAULT_BUILD_TIMEOUT : timeout;
|
|
182
|
+
|
|
183
|
+
const message = {
|
|
184
|
+
job,
|
|
185
|
+
executorType,
|
|
186
|
+
buildConfig: {
|
|
187
|
+
...buildConfig,
|
|
188
|
+
buildTimeout,
|
|
189
|
+
uiUri: ecosystem.ui,
|
|
190
|
+
storeUri: ecosystem.store
|
|
191
|
+
}
|
|
192
|
+
};
|
|
193
|
+
|
|
194
|
+
return { message, topic, messageId };
|
|
195
|
+
}
|
|
166
196
|
/**
|
|
167
197
|
* Schedule a job based on mode
|
|
168
198
|
* @method schedule
|
|
@@ -175,18 +205,17 @@ async function schedule(job, buildConfig) {
|
|
|
175
205
|
|
|
176
206
|
delete buildConfig.buildClusterName;
|
|
177
207
|
|
|
208
|
+
if (kafkaEnabled && buildConfig.provider) {
|
|
209
|
+
const { message, topic, messageId } = getKafkaMessageRequest(job, buildConfig);
|
|
210
|
+
|
|
211
|
+
return pushToKafka(message, topic, messageId);
|
|
212
|
+
}
|
|
213
|
+
|
|
178
214
|
const msg = {
|
|
179
215
|
job,
|
|
180
216
|
buildConfig
|
|
181
217
|
};
|
|
182
218
|
|
|
183
|
-
if (kafkaEnabled && buildConfig.provider) {
|
|
184
|
-
const { accountId, region } = buildConfig.provider;
|
|
185
|
-
const messageId = `${job}-${buildConfig.buildId}`;
|
|
186
|
-
|
|
187
|
-
return pushToKafka(msg, getTopicName(accountId, region), messageId);
|
|
188
|
-
}
|
|
189
|
-
|
|
190
219
|
if (rabbitmqConf.getConfig().schedulerMode) {
|
|
191
220
|
try {
|
|
192
221
|
return await pushToRabbitMq(msg, buildCluster, buildConfig.buildId);
|