mocha 7.1.1 → 8.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,174 @@
1
+ /**
2
+ * A wrapper around a third-party child process worker pool implementation.
3
+ * Used by {@link module:buffered-runner}.
4
+ * @private
5
+ * @module buffered-worker-pool
6
+ */
7
+
8
+ 'use strict';
9
+
10
+ const serializeJavascript = require('serialize-javascript');
11
+ const workerpool = require('workerpool');
12
+ const {deserialize} = require('./serializer');
13
+ const debug = require('debug')('mocha:parallel:buffered-worker-pool');
14
+ const {createInvalidArgumentTypeError} = require('../errors');
15
+
16
+ const WORKER_PATH = require.resolve('./worker.js');
17
+
18
+ /**
19
+ * A mapping of Mocha `Options` objects to serialized values.
20
+ *
21
+ * This is helpful because we tend to same the same options over and over
22
+ * over IPC.
23
+ * @type {WeakMap<Options,string>}
24
+ */
25
+ let optionsCache = new WeakMap();
26
+
27
+ /**
28
+ * These options are passed into the [workerpool](https://npm.im/workerpool) module.
29
+ * @type {Partial<WorkerPoolOptions>}
30
+ */
31
+ const WORKER_POOL_DEFAULT_OPTS = {
32
+ // use child processes, not worker threads!
33
+ workerType: 'process',
34
+ // ensure the same flags sent to `node` for this `mocha` invocation are passed
35
+ // along to children
36
+ forkOpts: {execArgv: process.execArgv},
37
+ maxWorkers: workerpool.cpus - 1
38
+ };
39
+
40
+ /**
41
+ * A wrapper around a third-party worker pool implementation.
42
+ * @private
43
+ */
44
+ class BufferedWorkerPool {
45
+ /**
46
+ * Creates an underlying worker pool instance; determines max worker count
47
+ * @param {Partial<WorkerPoolOptions>} [opts] - Options
48
+ */
49
+ constructor(opts = {}) {
50
+ const maxWorkers = Math.max(
51
+ 1,
52
+ typeof opts.maxWorkers === 'undefined'
53
+ ? WORKER_POOL_DEFAULT_OPTS.maxWorkers
54
+ : opts.maxWorkers
55
+ );
56
+
57
+ /* istanbul ignore next */
58
+ if (workerpool.cpus < 2) {
59
+ // TODO: decide whether we should warn
60
+ debug(
61
+ 'not enough CPU cores available to run multiple jobs; avoid --parallel on this machine'
62
+ );
63
+ } else if (maxWorkers >= workerpool.cpus) {
64
+ // TODO: decide whether we should warn
65
+ debug(
66
+ '%d concurrent job(s) requested, but only %d core(s) available',
67
+ maxWorkers,
68
+ workerpool.cpus
69
+ );
70
+ }
71
+ /* istanbul ignore next */
72
+ debug(
73
+ 'run(): starting worker pool of max size %d, using node args: %s',
74
+ maxWorkers,
75
+ process.execArgv.join(' ')
76
+ );
77
+
78
+ this.options = Object.assign({}, WORKER_POOL_DEFAULT_OPTS, opts, {
79
+ maxWorkers
80
+ });
81
+ this._pool = workerpool.pool(WORKER_PATH, this.options);
82
+ }
83
+
84
+ /**
85
+ * Terminates all workers in the pool.
86
+ * @param {boolean} [force] - Whether to force-kill workers. By default, lets workers finish their current task before termination.
87
+ * @private
88
+ * @returns {Promise<void>}
89
+ */
90
+ async terminate(force = false) {
91
+ /* istanbul ignore next */
92
+ debug('terminate(): terminating with force = %s', force);
93
+ return this._pool.terminate(force);
94
+ }
95
+
96
+ /**
97
+ * Adds a test file run to the worker pool queue for execution by a worker process.
98
+ *
99
+ * Handles serialization/deserialization.
100
+ *
101
+ * @param {string} filepath - Filepath of test
102
+ * @param {Options} [options] - Options for Mocha instance
103
+ * @private
104
+ * @returns {Promise<SerializedWorkerResult>}
105
+ */
106
+ async run(filepath, options = {}) {
107
+ if (!filepath || typeof filepath !== 'string') {
108
+ throw createInvalidArgumentTypeError(
109
+ 'Expected a non-empty filepath',
110
+ 'filepath',
111
+ 'string'
112
+ );
113
+ }
114
+ const serializedOptions = BufferedWorkerPool.serializeOptions(options);
115
+ const result = await this._pool.exec('run', [filepath, serializedOptions]);
116
+ return deserialize(result);
117
+ }
118
+
119
+ /**
120
+ * Returns stats about the state of the worker processes in the pool.
121
+ *
122
+ * Used for debugging.
123
+ *
124
+ * @private
125
+ */
126
+ stats() {
127
+ return this._pool.stats();
128
+ }
129
+
130
+ /**
131
+ * Instantiates a {@link WorkerPool}.
132
+ * @private
133
+ */
134
+ static create(...args) {
135
+ return new BufferedWorkerPool(...args);
136
+ }
137
+
138
+ /**
139
+ * Given Mocha options object `opts`, serialize into a format suitable for
140
+ * transmission over IPC.
141
+ *
142
+ * @param {Options} [opts] - Mocha options
143
+ * @private
144
+ * @returns {string} Serialized options
145
+ */
146
+ static serializeOptions(opts = {}) {
147
+ if (!optionsCache.has(opts)) {
148
+ const serialized = serializeJavascript(opts, {
149
+ unsafe: true, // this means we don't care about XSS
150
+ ignoreFunction: true // do not serialize functions
151
+ });
152
+ optionsCache.set(opts, serialized);
153
+ /* istanbul ignore next */
154
+ debug(
155
+ 'serializeOptions(): serialized options %O to: %s',
156
+ opts,
157
+ serialized
158
+ );
159
+ }
160
+ return optionsCache.get(opts);
161
+ }
162
+
163
+ /**
164
+ * Resets internal cache of serialized options objects.
165
+ *
166
+ * For testing/debugging
167
+ * @private
168
+ */
169
+ static resetOptionsCache() {
170
+ optionsCache = new WeakMap();
171
+ }
172
+ }
173
+
174
+ exports.BufferedWorkerPool = BufferedWorkerPool;
@@ -8,7 +8,8 @@
8
8
  const os = require('os');
9
9
  const path = require('path');
10
10
  const {sync: which} = require('which');
11
- const {EVENT_RUN_END} = require('./runner').constants;
11
+ const {EVENT_RUN_END} = require('../runner').constants;
12
+ const {isBrowser} = require('../utils');
12
13
 
13
14
  /**
14
15
  * @summary
@@ -25,7 +26,7 @@ const {EVENT_RUN_END} = require('./runner').constants;
25
26
  * @return {boolean} whether Growl notification support can be expected
26
27
  */
27
28
  exports.isCapable = () => {
28
- if (!process.browser) {
29
+ if (!isBrowser()) {
29
30
  return getSupportBinaries().reduce(
30
31
  (acc, binary) => acc || Boolean(which(binary, {nothrow: true})),
31
32
  false
@@ -0,0 +1,293 @@
1
+ /**
2
+ * A test Runner that uses a {@link module:buffered-worker-pool}.
3
+ * @module parallel-buffered-runner
4
+ * @private
5
+ */
6
+
7
+ 'use strict';
8
+
9
+ const allSettled = require('promise.allsettled');
10
+ const Runner = require('../runner');
11
+ const {EVENT_RUN_BEGIN, EVENT_RUN_END} = Runner.constants;
12
+ const debug = require('debug')('mocha:parallel:parallel-buffered-runner');
13
+ const {BufferedWorkerPool} = require('./buffered-worker-pool');
14
+ const {setInterval, clearInterval} = global;
15
+ const {createMap} = require('../utils');
16
+
17
+ /**
18
+ * Outputs a debug statement with worker stats
19
+ * @param {BufferedWorkerPool} pool - Worker pool
20
+ */
21
+ /* istanbul ignore next */
22
+ const debugStats = pool => {
23
+ const {totalWorkers, busyWorkers, idleWorkers, pendingTasks} = pool.stats();
24
+ debug(
25
+ '%d/%d busy workers; %d idle; %d tasks queued',
26
+ busyWorkers,
27
+ totalWorkers,
28
+ idleWorkers,
29
+ pendingTasks
30
+ );
31
+ };
32
+
33
+ /**
34
+ * The interval at which we will display stats for worker processes in debug mode
35
+ */
36
+ const DEBUG_STATS_INTERVAL = 5000;
37
+
38
+ const ABORTED = 'ABORTED';
39
+ const IDLE = 'IDLE';
40
+ const ABORTING = 'ABORTING';
41
+ const RUNNING = 'RUNNING';
42
+ const BAILING = 'BAILING';
43
+ const BAILED = 'BAILED';
44
+ const COMPLETE = 'COMPLETE';
45
+
46
+ const states = createMap({
47
+ [IDLE]: new Set([RUNNING, ABORTING]),
48
+ [RUNNING]: new Set([COMPLETE, BAILING, ABORTING]),
49
+ [COMPLETE]: new Set(),
50
+ [ABORTED]: new Set(),
51
+ [ABORTING]: new Set([ABORTED]),
52
+ [BAILING]: new Set([BAILED, ABORTING]),
53
+ [BAILED]: new Set([COMPLETE, ABORTING])
54
+ });
55
+
56
+ /**
57
+ * This `Runner` delegates tests runs to worker threads. Does not execute any
58
+ * {@link Runnable}s by itself!
59
+ * @private
60
+ */
61
+ class ParallelBufferedRunner extends Runner {
62
+ constructor(...args) {
63
+ super(...args);
64
+
65
+ let state = IDLE;
66
+ Object.defineProperty(this, '_state', {
67
+ get() {
68
+ return state;
69
+ },
70
+ set(newState) {
71
+ if (states[state].has(newState)) {
72
+ state = newState;
73
+ } else {
74
+ throw new Error(`invalid state transition: ${state} => ${newState}`);
75
+ }
76
+ }
77
+ });
78
+
79
+ this.once(Runner.constants.EVENT_RUN_END, () => {
80
+ this._state = COMPLETE;
81
+ });
82
+ }
83
+
84
+ /**
85
+ * Returns a mapping function to enqueue a file in the worker pool and return results of its execution.
86
+ * @param {BufferedWorkerPool} pool - Worker pool
87
+ * @param {Options} options - Mocha options
88
+ * @returns {FileRunner} Mapping function
89
+ */
90
+ _createFileRunner(pool, options) {
91
+ return async file => {
92
+ debug('run(): enqueueing test file %s', file);
93
+ try {
94
+ const {failureCount, events} = await pool.run(file, options);
95
+ if (this._state === BAILED) {
96
+ // short-circuit after a graceful bail. if this happens,
97
+ // some other worker has bailed.
98
+ // TODO: determine if this is the desired behavior, or if we
99
+ // should report the events of this run anyway.
100
+ return;
101
+ }
102
+ debug(
103
+ 'run(): completed run of file %s; %d failures / %d events',
104
+ file,
105
+ failureCount,
106
+ events.length
107
+ );
108
+ this.failures += failureCount; // can this ever be non-numeric?
109
+ let event = events.shift();
110
+ while (event) {
111
+ this.emit(event.eventName, event.data, event.error);
112
+ if (
113
+ this._state !== BAILING &&
114
+ event.data &&
115
+ event.data._bail &&
116
+ (failureCount || event.error)
117
+ ) {
118
+ debug('run(): nonzero failure count & found bail flag');
119
+ // we need to let the events complete for this file, as the worker
120
+ // should run any cleanup hooks
121
+ this._state = BAILING;
122
+ }
123
+ event = events.shift();
124
+ }
125
+ if (this._state === BAILING) {
126
+ debug('run(): terminating pool due to "bail" flag');
127
+ this._state = BAILED;
128
+ await pool.terminate();
129
+ }
130
+ } catch (err) {
131
+ if (this._state === BAILED || this._state === ABORTING) {
132
+ debug(
133
+ 'run(): worker pool terminated with intent; skipping file %s',
134
+ file
135
+ );
136
+ } else {
137
+ // this is an uncaught exception
138
+ debug('run(): encountered uncaught exception: %O', err);
139
+ if (this.allowUncaught) {
140
+ // still have to clean up
141
+ this._state = ABORTING;
142
+ await pool.terminate(true);
143
+ }
144
+ throw err;
145
+ }
146
+ } finally {
147
+ debug('run(): done running file %s', file);
148
+ }
149
+ };
150
+ }
151
+
152
+ /**
153
+ * Listen on `Process.SIGINT`; terminate pool if caught.
154
+ * Returns the listener for later call to `process.removeListener()`.
155
+ * @param {BufferedWorkerPool} pool - Worker pool
156
+ * @returns {SigIntListener} Listener
157
+ */
158
+ _bindSigIntListener(pool) {
159
+ const sigIntListener = async () => {
160
+ debug('run(): caught a SIGINT');
161
+ this._state = ABORTING;
162
+
163
+ try {
164
+ debug('run(): force-terminating worker pool');
165
+ await pool.terminate(true);
166
+ } catch (err) {
167
+ console.error(
168
+ `Error while attempting to force-terminate worker pool: ${err}`
169
+ );
170
+ process.exitCode = 1;
171
+ } finally {
172
+ process.nextTick(() => {
173
+ debug('run(): imminent death');
174
+ this._state = ABORTED;
175
+ process.kill(process.pid, 'SIGINT');
176
+ });
177
+ }
178
+ };
179
+
180
+ process.once('SIGINT', sigIntListener);
181
+
182
+ return sigIntListener;
183
+ }
184
+
185
+ /**
186
+ * Runs Mocha tests by creating a thread pool, then delegating work to the
187
+ * worker threads.
188
+ *
189
+ * Each worker receives one file, and as workers become available, they take a
190
+ * file from the queue and run it. The worker thread execution is treated like
191
+ * an RPC--it returns a `Promise` containing serialized information about the
192
+ * run. The information is processed as it's received, and emitted to a
193
+ * {@link Reporter}, which is likely listening for these events.
194
+ *
195
+ * @param {Function} callback - Called with an exit code corresponding to
196
+ * number of test failures.
197
+ * @param {{files: string[], options: Options}} opts - Files to run and
198
+ * command-line options, respectively.
199
+ */
200
+ run(callback, {files, options} = {}) {
201
+ /**
202
+ * Listener on `Process.SIGINT` which tries to cleanly terminate the worker pool.
203
+ */
204
+ let sigIntListener;
205
+ // This function should _not_ return a `Promise`; its parent (`Runner#run`)
206
+ // returns this instance, so this should do the same. However, we want to make
207
+ // use of `async`/`await`, so we use this IIFE.
208
+
209
+ (async () => {
210
+ /**
211
+ * This is an interval that outputs stats about the worker pool every so often
212
+ */
213
+ let debugInterval;
214
+
215
+ /**
216
+ * @type {BufferedWorkerPool}
217
+ */
218
+ let pool;
219
+
220
+ try {
221
+ pool = BufferedWorkerPool.create({maxWorkers: options.jobs});
222
+
223
+ sigIntListener = this._bindSigIntListener(pool);
224
+
225
+ /* istanbul ignore next */
226
+ debugInterval = setInterval(
227
+ () => debugStats(pool),
228
+ DEBUG_STATS_INTERVAL
229
+ ).unref();
230
+
231
+ // this is set for uncaught exception handling in `Runner#uncaught`
232
+ // TODO: `Runner` should be using a state machine instead.
233
+ this.started = true;
234
+ this._state = RUNNING;
235
+
236
+ this.emit(EVENT_RUN_BEGIN);
237
+
238
+ const results = await allSettled(
239
+ files.map(this._createFileRunner(pool, options))
240
+ );
241
+
242
+ // note that pool may already be terminated due to --bail
243
+ await pool.terminate();
244
+
245
+ results
246
+ .filter(({status}) => status === 'rejected')
247
+ .forEach(({reason}) => {
248
+ if (this.allowUncaught) {
249
+ // yep, just the first one.
250
+ throw reason;
251
+ }
252
+ // "rejected" will correspond to uncaught exceptions.
253
+ // unlike the serial runner, the parallel runner can always recover.
254
+ this.uncaught(reason);
255
+ });
256
+
257
+ if (this._state === ABORTING) {
258
+ return;
259
+ }
260
+ this.emit(EVENT_RUN_END);
261
+ debug('run(): completing with failure count %d', this.failures);
262
+ callback(this.failures);
263
+ } catch (err) {
264
+ // this `nextTick` takes us out of the `Promise` scope, so the
265
+ // exception will not be caught and returned as a rejected `Promise`,
266
+ // which would lead to an `unhandledRejection` event.
267
+ process.nextTick(() => {
268
+ debug('run(): re-throwing uncaught exception');
269
+ throw err;
270
+ });
271
+ } finally {
272
+ clearInterval(debugInterval);
273
+ process.removeListener('SIGINT', sigIntListener);
274
+ }
275
+ })();
276
+ return this;
277
+ }
278
+ }
279
+
280
+ module.exports = ParallelBufferedRunner;
281
+
282
+ /**
283
+ * Listener function intended to be bound to `Process.SIGINT` event
284
+ * @callback SigIntListener
285
+ * @returns {Promise<void>}
286
+ */
287
+
288
+ /**
289
+ * A function accepting a test file path and returning the results of a test run
290
+ * @callback FileRunner
291
+ * @param {string} filename - File to run
292
+ * @returns {Promise<SerializedWorkerResult>}
293
+ */
@@ -0,0 +1,133 @@
1
+ /**
2
+ * "Buffered" reporter used internally by a worker process when running in parallel mode.
3
+ * @module reporters/parallel-buffered
4
+ * @private
5
+ */
6
+
7
+ 'use strict';
8
+
9
+ /**
10
+ * Module dependencies.
11
+ */
12
+
13
+ const {
14
+ EVENT_SUITE_BEGIN,
15
+ EVENT_SUITE_END,
16
+ EVENT_TEST_FAIL,
17
+ EVENT_TEST_PASS,
18
+ EVENT_TEST_PENDING,
19
+ EVENT_TEST_BEGIN,
20
+ EVENT_TEST_END,
21
+ EVENT_TEST_RETRY,
22
+ EVENT_DELAY_BEGIN,
23
+ EVENT_DELAY_END,
24
+ EVENT_HOOK_BEGIN,
25
+ EVENT_HOOK_END,
26
+ EVENT_RUN_END
27
+ } = require('../../runner').constants;
28
+ const {SerializableEvent, SerializableWorkerResult} = require('../serializer');
29
+ const debug = require('debug')('mocha:reporters:buffered');
30
+ const Base = require('../../reporters/base');
31
+
32
+ /**
33
+ * List of events to listen to; these will be buffered and sent
34
+ * when `Mocha#run` is complete (via {@link ParallelBuffered#done}).
35
+ */
36
+ const EVENT_NAMES = [
37
+ EVENT_SUITE_BEGIN,
38
+ EVENT_SUITE_END,
39
+ EVENT_TEST_BEGIN,
40
+ EVENT_TEST_PENDING,
41
+ EVENT_TEST_FAIL,
42
+ EVENT_TEST_PASS,
43
+ EVENT_TEST_RETRY,
44
+ EVENT_TEST_END,
45
+ EVENT_HOOK_BEGIN,
46
+ EVENT_HOOK_END
47
+ ];
48
+
49
+ /**
50
+ * Like {@link EVENT_NAMES}, except we expect these events to only be emitted
51
+ * by the `Runner` once.
52
+ */
53
+ const ONCE_EVENT_NAMES = [EVENT_DELAY_BEGIN, EVENT_DELAY_END];
54
+
55
+ /**
56
+ * The `ParallelBuffered` reporter is for use by concurrent runs. Instead of outputting
57
+ * to `STDOUT`, etc., it retains a list of events it receives and hands these
58
+ * off to the callback passed into {@link Mocha#run}. That callback will then
59
+ * return the data to the main process.
60
+ * @private
61
+ */
62
+ class ParallelBuffered extends Base {
63
+ /**
64
+ * Listens for {@link Runner} events and retains them in an `events` instance prop.
65
+ * @param {Runner} runner
66
+ */
67
+ constructor(runner, opts) {
68
+ super(runner, opts);
69
+
70
+ /**
71
+ * Retained list of events emitted from the {@link Runner} instance.
72
+ * @type {BufferedEvent[]}
73
+ * @memberOf Buffered
74
+ */
75
+ const events = (this.events = []);
76
+
77
+ /**
78
+ * mapping of event names to listener functions we've created,
79
+ * so we can cleanly _remove_ them from the runner once it's completed.
80
+ */
81
+ const listeners = new Map();
82
+
83
+ /**
84
+ * Creates a listener for event `eventName` and adds it to the `listeners`
85
+ * map. This is a defensive measure, so that we don't a) leak memory or b)
86
+ * remove _other_ listeners that may not be associated with this reporter.
87
+ * @param {string} eventName - Event name
88
+ */
89
+ const createListener = eventName =>
90
+ listeners
91
+ .set(eventName, (runnable, err) => {
92
+ events.push(SerializableEvent.create(eventName, runnable, err));
93
+ })
94
+ .get(eventName);
95
+
96
+ EVENT_NAMES.forEach(evt => {
97
+ runner.on(evt, createListener(evt));
98
+ });
99
+ ONCE_EVENT_NAMES.forEach(evt => {
100
+ runner.once(evt, createListener(evt));
101
+ });
102
+
103
+ runner.once(EVENT_RUN_END, () => {
104
+ debug('received EVENT_RUN_END');
105
+ listeners.forEach((listener, evt) => {
106
+ runner.removeListener(evt, listener);
107
+ listeners.delete(evt);
108
+ });
109
+ });
110
+ }
111
+
112
+ /**
113
+ * Calls the {@link Mocha#run} callback (`callback`) with the test failure
114
+ * count and the array of {@link BufferedEvent} objects. Resets the array.
115
+ * @param {number} failures - Number of failed tests
116
+ * @param {Function} callback - The callback passed to {@link Mocha#run}.
117
+ */
118
+ done(failures, callback) {
119
+ callback(SerializableWorkerResult.create(this.events, failures));
120
+ this.events = []; // defensive
121
+ }
122
+ }
123
+
124
+ /**
125
+ * Serializable event data from a `Runner`. Keys of the `data` property
126
+ * beginning with `__` will be converted into a function which returns the value
127
+ * upon deserialization.
128
+ * @typedef {Object} BufferedEvent
129
+ * @property {string} name - Event name
130
+ * @property {object} data - Event parameters
131
+ */
132
+
133
+ module.exports = ParallelBuffered;