@promistream/simple-queue 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/index.js +53 -80
  2. package/package.json +10 -3
  3. package/test.js +63 -0
package/index.js CHANGED
@@ -1,94 +1,67 @@
1
1
  "use strict";
2
2
 
3
- // DOCS: A basic in-memory queue, with no persistence capabilities. Typically useful for implementing recursive asynchronous operations as a stream, such as recursively walking a directory tree.
4
- // FIXME: Add to spec a suggestion to use the `foo.stream` pattern for things that return more than just a stream?
3
+ const pushBuffer = require("push-buffer");
4
+ const simpleSource = require("@promistream/simple-source");
5
+ const EndOfStream = require("@promistream/end-of-stream");
6
+ const makeDebug = require("debug-instanced")("promistream:simple-queue");
5
7
 
6
- const asExpression = require("as-expression");
7
- const matchValue = require("match-value");
8
- const errorChain = require("error-chain");
8
+ const { validateArguments } = require("@validatem/core");
9
+ const arrayOf = require("@validatem/array-of");
10
+ const isBoolean = require("@validatem/is-boolean");
11
+ const anything = require("@validatem/anything");
12
+ const defaultTo = require("@validatem/default-to");
9
13
 
10
- const EndOfStream = require("@promistream/end-of-stream");
11
- const Aborted = require("@promistream/aborted");
14
+ let currentDebugID = 0;
12
15
 
13
- module.exports = function simpleQueue(initialItems) {
14
- // FIXME: Validatem
15
- let items = initialItems.slice();
16
+ module.exports = function createSimpleQueue(_initialItems, _options) {
17
+ let [ initialItems, { autoEnd }] = validateArguments(arguments, {
18
+ initialItems: [ defaultTo([]), arrayOf(anything) ],
19
+ options: [ defaultTo({}), {
20
+ autoEnd: [ defaultTo(true), isBoolean ]
21
+ }]
22
+ });
16
23
 
17
- let peekPointer = 0;
18
- let isEnded = false;
19
- let errorReason;
24
+ // TODO: Add initialItems to instance description, if there's a sufficiently cheap way to serialize that
25
+ let debug = makeDebug(`ID ${currentDebugID++}`);
20
26
 
21
- return {
22
- push: function addItemToQueue(item) {
23
- // TODO: Throw an error if stream has already ended?
24
- items.push(item);
25
- },
26
- stream: {
27
- _promistreamVersion: 0,
28
- _promistreamIsSource: true,
29
- description: `simple queue`,
30
- peek: async function peekValue_simpleQueue() {
31
- // NOTE: 0 items in the queue does *not* mean that the stream has ended, here! This may be a temporary state, and so only translates into an ended state if it is still true when a read is attempted, as per the spec's defined behaviour for parallelized streams.
32
- if (errorReason != null || isEnded === true) {
33
- return true;
34
- } else if (items.length > 0 && peekPointer < items.length - 1) {
35
- peekPointer += 1;
36
- return true;
37
- } else {
38
- return false;
39
- }
40
- },
41
- read: async function produceValue_simpleQueue() {
42
- let action = asExpression(() => {
43
- if (peekPointer > 0) {
44
- return "readValue";
45
- } else if (errorReason != null) {
46
- return "readError";
47
- } else if (items.length === 0 || isEnded) {
48
- return "endOfStream";
49
- } else {
50
- return "readValue";
51
- }
52
- });
27
+ let buffer = pushBuffer({ mode: "push" });
28
+ let isEnded = false;
53
29
 
54
- return matchValue(action, {
55
- readValue: () => {
56
- // NOTE: As per the spec, each peek corresponds to a later read, but reads are allowed to happen without preceding peeks. Therefore, we should only reduce the peekPointer when it's actually been incremented to begin with.
57
- if (peekPointer > 0) {
58
- peekPointer -= 1;
59
- }
30
+ debug(`pushing ${initialItems.length} initial items`);
31
+ for (let item of initialItems) {
32
+ buffer.push(item);
33
+ }
60
34
 
61
- return items.shift();
62
- },
63
- readError: () => {
64
- // FIXME: Abstract out this logic, it's reusable across source streams
65
- if (errorReason === true) {
66
- throw new Aborted("Stream was aborted");
67
- } else if (errorReason instanceof Error) {
68
- throw new errorChain.chain(errorReason, Aborted, `Stream was aborted due to error: ${errorReason.message}`);
69
- }
70
- },
71
- endOfStream: () => {
72
- // This locks the queue into an 'ended' state for spec compliance, even if some misbehaving code pushes an item after the pipeline has already been torn down
73
- isEnded = true;
35
+ let stream = simpleSource(() => {
36
+ if (isEnded === false && buffer.countLane().values > 0) {
37
+ debug(`values available; requesting one`);
38
+ return buffer.request();
39
+ } else if (autoEnd === false) {
40
+ // We will wait for an external end signal, and never end the stream ourselves
41
+ debug(`no values available, but autoEnd is disabled; waiting for value...`);
42
+ return buffer.request();
43
+ } else {
44
+ // Lock into ended state; this stream will never produce values again, even if values get pushed later
45
+ debug(`stream has ended`);
46
+ isEnded = true;
47
+ throw new EndOfStream();
48
+ }
49
+ });
74
50
 
75
- throw new EndOfStream;
76
- }
77
- });
78
- },
79
- abort: async function abort_simpleQueue(reason, _source) {
80
- // FIXME: Abstract out this logic, it's reusable across source streams
81
- if (errorReason == null) {
82
- if (reason === true || reason instanceof Error) {
83
- errorReason = reason;
84
- } else {
85
- throw new Error("You must specify a reason (either `true` or an Error object) when aborting a stream");
86
- }
87
- } else {
88
- // FIXME: Require this behaviour in the spec? Or is there a composability-related reason to permit/require quietly ignoring this, to make it idempotent?
89
- throw new Error(`The stream is already aborted`);
90
- }
51
+ return {
52
+ stream: stream,
53
+ push: async function (item) {
54
+ if (isEnded) {
55
+ throw new Error(`Queue has already ended`);
56
+ } else {
57
+ debug(`pushing item to queue`, item);
58
+ return buffer.push(item);
91
59
  }
60
+ },
61
+ end: async function () {
62
+ debug(`marking queue as ended`);
63
+ isEnded = true;
64
+ return buffer.pushError(new EndOfStream("Queue was ended"));
92
65
  }
93
66
  };
94
67
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promistream/simple-queue",
3
- "version": "0.1.0",
3
+ "version": "0.1.2",
4
4
  "main": "index.js",
5
5
  "repository": "http://git.cryto.net/promistream/simple-queue.git",
6
6
  "author": "Sven Slootweg <admin@cryto.net>",
@@ -8,13 +8,20 @@
8
8
  "dependencies": {
9
9
  "@promistream/aborted": "^0.1.2",
10
10
  "@promistream/end-of-stream": "^0.1.0",
11
+ "@validatem/anything": "^0.1.0",
12
+ "@validatem/array-of": "^0.1.3",
13
+ "@validatem/core": "^0.6.0",
14
+ "@validatem/default-to": "^0.1.0",
15
+ "@validatem/is-boolean": "^0.1.1",
11
16
  "as-expression": "^1.0.0",
17
+ "debug-instanced": "^1.0.0",
12
18
  "error-chain": "^0.1.3",
13
- "match-value": "^1.1.0"
19
+ "match-value": "^1.1.0",
20
+ "push-buffer": "^1.0.0"
14
21
  },
15
22
  "devDependencies": {
16
23
  "@promistream/collect": "^0.1.1",
17
- "@promistream/parallelize": "^0.1.0",
24
+ "@promistream/parallelize": "^0.1.3",
18
25
  "@promistream/pipe": "^0.1.6",
19
26
  "@promistream/spy": "^0.1.0",
20
27
  "bluebird": "^3.7.2"
package/test.js ADDED
@@ -0,0 +1,63 @@
1
+ "use strict";
2
+
3
+ const test = require("node:test");
4
+ const assert = require("node:assert");
5
+
6
+ const simpleQueue = require("./");
7
+
8
+ const pipe = require("@promistream/pipe");
9
+ const spy = require("@promistream/spy");
10
+ const parallelize = require("@promistream/parallelize");
11
+ const collect = require("@promistream/collect");
12
+
13
+ test("basic functioning", async () => {
14
+ let queue = simpleQueue([ 0, 1, 2, 3, 4, 5, 6, 7 ]);
15
+
16
+ let result = await pipe([
17
+ queue.stream,
18
+ spy((value) => {
19
+ if (value < 5) {
20
+ queue.push(value + 8);
21
+ }
22
+ }),
23
+ collect()
24
+ ]).read();
25
+
26
+ assert.deepStrictEqual(result, [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ]);
27
+ });
28
+
29
+ test("unbounded parallelism", async () => {
30
+ let queue = simpleQueue([ 0, 1, 2, 3, 4, 5, 6, 7 ]);
31
+
32
+ let result = await pipe([
33
+ queue.stream,
34
+ spy((value) => {
35
+ if (value < 5) {
36
+ queue.push(value + 8);
37
+ }
38
+ }),
39
+ parallelize(Infinity),
40
+ collect()
41
+ ]).read();
42
+
43
+ assert.deepStrictEqual(result, [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ]);
44
+ });
45
+
46
+ test("autoEnd = false", async () => {
47
+ let queue = simpleQueue([ 0, 1, 2, 3, 4, 5, 6, 7 ], { autoEnd: false });
48
+
49
+ setTimeout(() => queue.end(), 250); // Much longer than it would normally take to complete
50
+
51
+ let result = await pipe([
52
+ queue.stream,
53
+ spy((value) => {
54
+ if (value < 5) {
55
+ queue.push(value + 8);
56
+ }
57
+ }),
58
+ parallelize(300),
59
+ collect()
60
+ ]).read();
61
+
62
+ assert.deepStrictEqual(result, [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ]);
63
+ });