screwdriver-queue-service 2.0.13 → 2.0.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -272,3 +272,22 @@ scheduler:
272
272
  vhost: RABBITMQ_VHOST
273
273
  # Connection options
274
274
  connectOptions: RABBITMQ_CONNECT_OPTIONS
275
+ kafka:
276
+ # flag for kafka broker
277
+ enabled: KAFKA_ENABLED
278
+ # kafka brokers list
279
+ hosts: KAFKA_BROKERS_LIST
280
+ # sasl options
281
+ sasl:
282
+ # sasl mechanism
283
+ mechanism: SASL_MECHANISM
284
+ # secret id for sasl/scram
285
+ secretId: SASL_AWS_SECRET_ID
286
+ # client id of the producer
287
+ clientId: KAFKA_CLIENT_ID
288
+ # Amazon access key
289
+ accessKeyId: KAFKA_ACCESS_KEY_ID
290
+ # Amazon secret access key
291
+ secretAccessKey: KAFKA_ACCESS_KEY_SECRET
292
+ # AWS region
293
+ region: AWS_REGION
@@ -202,3 +202,23 @@ scheduler:
202
202
  vhost: /screwdriver
203
203
  # Connect Options
204
204
  connectOptions: { json: true, heartbeatIntervalInSeconds: 20, reconnectTimeInSeconds: 30 }
205
+
206
+ kafka:
207
+ # flag for kafka broker
208
+ enabled: false
209
+ # kafka brokers list
210
+ hosts: KAFKA_BROKERS_LIST
211
+ # sasl options
212
+ sasl:
213
+ # sasl mechanism
214
+ mechanism: scram-sha-512
215
+ # secret id for sasl/scram
216
+ secretId: fake-secret
217
+ # client id of the producer
218
+ clientId: sd-producer
219
+ # Amazon access key
220
+ accessKeyId: KAFKA_ACCESS_KEY_ID
221
+ # Amazon secret access key
222
+ secretAccessKey: KAFKA_ACCESS_KEY_SECRET
223
+ # AWS region
224
+ region: AWS_REGION
package/lib/queue.js CHANGED
@@ -34,6 +34,8 @@ module.exports = class ExecutorQueue {
34
34
  this.userTokenGen = null;
35
35
  this.timeoutQueue = `${this.prefix}timeoutConfigs`;
36
36
  this.cacheQueue = `${this.prefix}cache`;
37
+ this.unzipQueue = `${this.prefix}unzip`;
38
+ this.webhookQueue = `${this.prefix}webhooks`;
37
39
 
38
40
  const redisConnection = { ...config.redisConnection, pkg: 'ioredis' };
39
41
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "screwdriver-queue-service",
3
- "version": "2.0.13",
3
+ "version": "2.0.17",
4
4
  "description": "Screwdriver Queue Service API",
5
5
  "main": "app.js",
6
6
  "directories": {
@@ -28,10 +28,11 @@
28
28
  "node-resque": "^5.5.3",
29
29
  "npm-auto-version": "^1.0.0",
30
30
  "redlock": "^4.2.0",
31
- "screwdriver-data-schema": "^21.2.5",
31
+ "screwdriver-aws-producer-service": "^1.1.0",
32
+ "screwdriver-data-schema": "^21.10.2",
32
33
  "screwdriver-executor-docker": "^5.0.2",
33
34
  "screwdriver-executor-jenkins": "^5.0.1",
34
- "screwdriver-executor-k8s": "^14.10.0",
35
+ "screwdriver-executor-k8s": "^14.14.4",
35
36
  "screwdriver-executor-k8s-vm": "^4.3.2",
36
37
  "screwdriver-executor-router": "^2.1.2",
37
38
  "screwdriver-logger": "^1.0.2",
@@ -52,7 +53,7 @@
52
53
  "mockery": "^2.1.0",
53
54
  "nyc": "^15.1.0",
54
55
  "sinon": "^9.2.4",
55
- "snyk": "^1.489.0",
56
+ "snyk": "^1.712.0",
56
57
  "util": "^0.12.2"
57
58
  },
58
59
  "scripts": {
@@ -60,7 +61,8 @@
60
61
  "pretest": "eslint . --quiet",
61
62
  "test": "nyc --report-dir ./artifacts/coverage --reporter=lcov mocha --reporter mocha-multi-reporters --reporter-options configFile=./mocha.config.json --recursive --timeout 4000 --retries 1 --exit --allow-uncaught true --color true",
62
63
  "debug": "node --nolazy ./bin/server",
63
- "functional": "cucumber-js --format=progress --tags 'not @ignore' --retry 2 --fail-fast --exit"
64
+ "functional": "cucumber-js --format=progress --tags 'not @ignore' --retry 2 --fail-fast --exit",
65
+ "semantic-release": "semantic-release"
64
66
  },
65
67
  "repository": {
66
68
  "type": "git",
@@ -85,9 +87,6 @@
85
87
  },
86
88
  "homepage": "https://github.com/screwdriver-cd/screwdriver-queue-service#readme",
87
89
  "release": {
88
- "debug": false,
89
- "verifyConditions": {
90
- "path": "./node_modules/semantic-release/src/lib/plugin-noop.js"
91
- }
90
+ "debug": false
92
91
  }
93
92
  }
@@ -33,6 +33,12 @@ module.exports = () => ({
33
33
  case 'cache':
34
34
  await scheduler.clearCache(executor, request.payload);
35
35
  break;
36
+ case 'unzip':
37
+ await scheduler.unzipArtifacts(executor, request.payload);
38
+ break;
39
+ case 'webhook':
40
+ await scheduler.queueWebhook(executor, request.payload);
41
+ break;
36
42
  default:
37
43
  await scheduler.start(executor, request.payload);
38
44
  break;
@@ -13,6 +13,7 @@ const RETRY_LIMIT = 3;
13
13
  const RETRY_DELAY = 5;
14
14
  const EXPIRE_TIME = 1800; // 30 mins
15
15
  const TEMPORAL_TOKEN_TIMEOUT = 12 * 60; // 12 hours in minutes
16
+ const TEMPORAL_UNZIP_TOKEN_TIMEOUT = 2 * 60; // 2 hours in minutes
16
17
 
17
18
  /**
18
19
  * Posts a new build event to the API
@@ -651,7 +652,7 @@ async function stop(executor, config) {
651
652
  }
652
653
 
653
654
  /**
654
- * Cleanup any reladed processing
655
+ * Cleanup any related processing
655
656
  */
656
657
  async function cleanUp(executor) {
657
658
  try {
@@ -688,6 +689,51 @@ async function clearCache(executor, config) {
688
689
  }
689
690
  }
690
691
 
692
+ /**
693
+ * Pushes a message to unzip artifacts
694
+ * @async unzipArtifacts
695
+ * @param {Object} executor
696
+ * @param {Object} config Configuration
697
+ * @param {String} config.buildId Unique ID for a build
698
+ * @return {Promise}
699
+ */
700
+ async function unzipArtifacts(executor, config) {
701
+ await executor.connect();
702
+ const { buildId } = config;
703
+ const tokenConfig = {
704
+ username: buildId,
705
+ scope: 'unzip_worker'
706
+ };
707
+ const token = executor.tokenGen(tokenConfig, TEMPORAL_UNZIP_TOKEN_TIMEOUT);
708
+
709
+ const enq = await executor.queueBreaker.runCommand('enqueue', executor.unzipQueue, 'start', [
710
+ {
711
+ buildId,
712
+ token
713
+ }
714
+ ]);
715
+
716
+ return enq;
717
+ }
718
+
719
+ /**
720
+ * Pushes webhooks to redis
721
+ * @async queueWebhook
722
+ * @param {Object} executor
723
+ * @param {Object} webhookConfig
724
+ * @return {Promise}
725
+ */
726
+ async function queueWebhook(executor, webhookConfig) {
727
+ await executor.connect();
728
+
729
+ return executor.queueBreaker.runCommand(
730
+ 'enqueue',
731
+ executor.webhookQueue,
732
+ 'sendWebhook',
733
+ JSON.stringify(webhookConfig)
734
+ );
735
+ }
736
+
691
737
  module.exports = {
692
738
  init,
693
739
  start,
@@ -699,5 +745,7 @@ module.exports = {
699
745
  startTimer,
700
746
  stopTimer,
701
747
  cleanUp,
702
- clearCache
748
+ clearCache,
749
+ unzipArtifacts,
750
+ queueWebhook
703
751
  };
@@ -6,6 +6,7 @@ const config = require('config');
6
6
  const hoek = require('@hapi/hoek');
7
7
  const ExecutorRouter = require('screwdriver-executor-router');
8
8
  const logger = require('screwdriver-logger');
9
+ const AWSProducer = require('screwdriver-aws-producer-service');
9
10
  const { BlockedBy } = require('./BlockedBy');
10
11
  const { Filter } = require('./Filter');
11
12
  const { CacheFilter } = require('./CacheFilter');
@@ -13,6 +14,7 @@ const blockedByConfig = config.get('plugins').blockedBy;
13
14
  const { connectionDetails, queuePrefix, runningJobsPrefix, waitingJobsPrefix } = require('../../../config/redis');
14
15
  const rabbitmqConf = require('../../../config/rabbitmq');
15
16
  const { amqpURI, exchange, connectOptions } = rabbitmqConf.getConfig();
17
+ const kafkaEnabled = config.get('kafka').enabled === 'true';
16
18
 
17
19
  const RETRY_LIMIT = 3;
18
20
  // This is in milliseconds, reference: https://github.com/taskrabbit/node-resque/blob/master/lib/plugins/Retry.js#L12
@@ -72,7 +74,7 @@ function getRabbitmqConn() {
72
74
  logger.info('Creating new rabbitmq connection.');
73
75
 
74
76
  rabbitmqConn.on('connect', () => logger.info('Connected to rabbitmq!'));
75
- rabbitmqConn.on('disconnect', params => logger.info('Disconnected from rabbitmq.', params.err.stack));
77
+ rabbitmqConn.on('disconnect', params => logger.info(`Disconnected from rabbitmq: ${params.err.stack}`));
76
78
 
77
79
  return rabbitmqConn;
78
80
  }
@@ -83,17 +85,23 @@ function getRabbitmqConn() {
83
85
  * @param {String} queue
84
86
  * @param {String} messageId
85
87
  */
86
- function pushToRabbitMq(message, queue, messageId) {
88
+ async function pushToRabbitMq(message, queue, messageId) {
87
89
  if (!rabbitmqConf.getConfig().schedulerMode) {
88
90
  return Promise.resolve();
89
91
  }
90
- const channelWrapper = getRabbitmqConn().createChannel({
92
+
93
+ const conn = getRabbitmqConn();
94
+ const channelWrapper = conn.createChannel({
91
95
  json: true,
92
96
  setup: channel => channel.checkExchange(exchange)
93
97
  });
94
98
 
95
99
  logger.info('publishing msg to rabbitmq: %s', messageId);
96
100
 
101
+ channelWrapper.on('error', (error, { name }) => {
102
+ logger.error(`channel wrapper error ${error}:${name}`);
103
+ });
104
+
97
105
  return channelWrapper
98
106
  .publish(exchange, queue, message, {
99
107
  contentType: 'application/json',
@@ -107,11 +115,24 @@ function pushToRabbitMq(message, queue, messageId) {
107
115
  .catch(err => {
108
116
  logger.error('publishing failed to rabbitmq: %s', err.message);
109
117
  channelWrapper.close();
110
-
118
+ conn.close();
111
119
  throw err;
112
120
  });
113
121
  }
114
122
 
123
+ /**
124
+ * Push message to Kafka topic
125
+ * @param {Object} message Job and build config metadata
126
+ * @param {String} topic Topic name
127
+ */
128
+ async function pushToKafka(message, topic) {
129
+ const conn = await AWSProducer.connect();
130
+
131
+ if (conn) {
132
+ await AWSProducer.sendMessage(message, topic);
133
+ }
134
+ }
135
+
115
136
  /**
116
137
  * Schedule a job based on mode
117
138
  * @method schedule
@@ -119,7 +140,7 @@ function pushToRabbitMq(message, queue, messageId) {
119
140
  * @param {Object} buildConfig build config
120
141
  * @return {Promise}
121
142
  */
122
- function schedule(job, buildConfig) {
143
+ async function schedule(job, buildConfig) {
123
144
  const buildCluster = buildConfig.buildClusterName;
124
145
 
125
146
  delete buildConfig.buildClusterName;
@@ -129,8 +150,20 @@ function schedule(job, buildConfig) {
129
150
  buildConfig
130
151
  };
131
152
 
153
+ if (kafkaEnabled && buildConfig.provider) {
154
+ const { accountId, region } = buildConfig.provider;
155
+ const topic = `builds-${accountId}-${region}`;
156
+
157
+ return pushToKafka(msg, topic);
158
+ }
159
+
132
160
  if (rabbitmqConf.getConfig().schedulerMode) {
133
- return pushToRabbitMq(msg, buildCluster, buildConfig.buildId);
161
+ try {
162
+ return await pushToRabbitMq(msg, buildCluster, buildConfig.buildId);
163
+ } catch (err) {
164
+ logger.error(`err in pushing to rabbitmq: ${err}`);
165
+ throw err;
166
+ }
134
167
  }
135
168
 
136
169
  // token is not allowed in executor.stop
@@ -150,15 +183,17 @@ function schedule(job, buildConfig) {
150
183
  * @param {String} buildConfig.blockedBy Jobs that are blocking this job
151
184
  * @return {Promise}
152
185
  */
153
- function start(buildConfig) {
154
- return redis
155
- .hget(`${queuePrefix}buildConfigs`, buildConfig.buildId)
156
- .then(fullBuildConfig => schedule('start', JSON.parse(fullBuildConfig)))
157
- .catch(err => {
158
- logger.error(`err in start job: ${err}`);
186
+ async function start(buildConfig) {
187
+ try {
188
+ const fullBuildConfig = await redis.hget(`${queuePrefix}buildConfigs`, buildConfig.buildId);
159
189
 
160
- return Promise.reject(err);
161
- });
190
+ await schedule('start', JSON.parse(fullBuildConfig));
191
+
192
+ return null;
193
+ } catch (err) {
194
+ logger.error(`err in start job: ${err}`);
195
+ throw err;
196
+ }
162
197
  }
163
198
 
164
199
  /**
@@ -169,79 +204,73 @@ function start(buildConfig) {
169
204
  * @param {String} buildConfig.jobId Job that this build belongs to
170
205
  * @param {String} buildConfig.blockedBy Jobs that are blocking this job
171
206
  * @param {String} buildConfig.started Whether job has started
207
+ * @param {String} buildConfig.jobName Job name
172
208
  * @return {Promise}
173
209
  */
174
- function stop(buildConfig) {
210
+ async function stop(buildConfig) {
175
211
  const started = hoek.reach(buildConfig, 'started', { default: true }); // default value for backward compatibility
176
- const { buildId, jobId } = buildConfig;
177
- const stopConfig = { buildId };
212
+ const { buildId, jobId, jobName } = buildConfig;
213
+ let stopConfig = { buildId, jobId, jobName };
178
214
  const runningKey = `${runningJobsPrefix}${jobId}`;
179
215
 
180
- return (
181
- redis
182
- .hget(`${queuePrefix}buildConfigs`, buildId)
183
- .then(fullBuildConfig => {
184
- const parsedConfig = JSON.parse(fullBuildConfig);
216
+ try {
217
+ const fullBuildConfig = await redis.hget(`${queuePrefix}buildConfigs`, buildId);
218
+ const parsedConfig = JSON.parse(fullBuildConfig);
219
+
220
+ if (parsedConfig) {
221
+ stopConfig = {
222
+ buildId,
223
+ ...parsedConfig
224
+ };
225
+ }
226
+ } catch (err) {
227
+ logger.error(`[Stop Build] failed to get config for build ${buildId}: ${err.message}`);
228
+ }
185
229
 
186
- if (parsedConfig && parsedConfig.annotations) {
187
- stopConfig.annotations = parsedConfig.annotations;
188
- }
230
+ await redis.hdel(`${queuePrefix}buildConfigs`, buildId);
231
+ // If this is a running job
232
+ const runningBuildId = await redis.get(runningKey);
189
233
 
190
- if (parsedConfig && parsedConfig.buildClusterName) {
191
- stopConfig.buildClusterName = parsedConfig.buildClusterName;
192
- }
234
+ if (parseInt(runningBuildId, 10) === buildId) {
235
+ await redis.del(runningKey);
236
+ }
237
+ // If this is a waiting job
238
+ await redis.lrem(`${waitingJobsPrefix}${jobId}`, 0, buildId);
193
239
 
194
- stopConfig.token = parsedConfig.token;
195
- })
196
- .catch(err => {
197
- logger.error(`[Stop Build] failed to get config for build ${buildId}: ${err.message}`);
198
- })
199
- .then(() => redis.hdel(`${queuePrefix}buildConfigs`, buildId))
200
- // If this is a running job
201
- .then(() => redis.get(runningKey))
202
- .then(runningBuildId => {
203
- if (parseInt(runningBuildId, 10) === buildId) {
204
- return redis.del(runningKey);
205
- }
206
-
207
- return null;
208
- })
209
- // If this is a waiting job
210
- .then(() => redis.lrem(`${waitingJobsPrefix}${jobId}`, 0, buildId))
211
- .then(() => (started ? schedule('stop', stopConfig) : null))
212
- );
240
+ if (started) {
241
+ await schedule('stop', stopConfig);
242
+ }
243
+
244
+ return null;
213
245
  }
214
246
 
215
247
  /**
216
248
  * Send message to clear cache from disk
217
249
  * @param {Object} cacheConfig
218
250
  */
219
- function clear(cacheConfig) {
251
+ async function clear(cacheConfig) {
220
252
  const { id, buildClusters } = cacheConfig;
221
- let queueName;
253
+ const data = await redis.hget(`${queuePrefix}buildConfigs`, id);
222
254
 
223
- return redis
224
- .hget(`${queuePrefix}buildConfigs`, id)
225
- .then(data => {
226
- if (data) {
227
- const buildConfig = JSON.parse(data);
255
+ if (data) {
256
+ const buildConfig = JSON.parse(data);
228
257
 
229
- queueName = buildConfig.buildClusterName;
230
- }
231
- })
232
- .then(() => {
233
- if (queueName) {
234
- return pushToRabbitMq({ job: 'clear', cacheConfig }, queueName, id);
235
- }
258
+ const queueName = buildConfig.buildClusterName;
259
+
260
+ if (queueName) {
261
+ await pushToRabbitMq({ job: 'clear', cacheConfig }, queueName, id);
262
+ }
263
+ }
236
264
 
237
- if (buildClusters) {
238
- return Promise.all(
239
- buildClusters.map(cluster => pushToRabbitMq({ job: 'clear', cacheConfig }, cluster, id))
240
- );
241
- }
265
+ if (buildClusters) {
266
+ await Promise.all(
267
+ buildClusters.map(async cluster => {
268
+ return pushToRabbitMq({ job: 'clear', cacheConfig }, cluster, id);
269
+ })
270
+ );
271
+ }
242
272
 
243
- return null;
244
- });
273
+ return null;
245
274
  }
246
275
 
247
276
  module.exports = {