rascal 18.0.0 → 18.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,10 @@
1
1
  # Change Log
2
2
 
3
+ ## 18.0.1
4
+
5
+ - Removed console.log when the channel pool destroyed a channel
6
+ - Add streams example
7
+
3
8
  ## 18.0.0
4
9
 
5
10
  - Fixes https://github.com/onebeyond/rascal/issues/227 by requiring special characters to be URL encoded.
package/README.md CHANGED
@@ -753,6 +753,62 @@ To define a queue with extensions such as `x-queue-type` add arguments to the op
753
753
 
754
754
  Refer to the [amqplib](https://www.squaremobius.net/amqp.node/channel_api.html) documentation for further queue options.
755
755
 
756
+ #### streams
757
+
758
+ Rascal supports [RabbitMQ Streams](https://www.rabbitmq.com/docs/streams) via x-queue-type argument, i.e.
759
+
760
+ ```json
761
+ {
762
+ "queues": {
763
+ "q1": {
764
+ "options": {
765
+ "arguments": {
766
+ "x-queue-type": "stream"
767
+ }
768
+ }
769
+ }
770
+ }
771
+ }
772
+ ```
773
+
774
+ The [Stream Plugin](https://www.rabbitmq.com/docs/stream) and associated binary protocol extension are not supported.
775
+
776
+ Streams are **not** a replacement for regular messaging - instead they are best suited for when you can tolerate occasional message loss and need for higher throughput, such as sampling web based analytics.
777
+
778
+ When working with streams you need to think carefully about [data retention](https://www.rabbitmq.com/docs/streams#retention). Unless you specify retention configuration, messages will never be deleted and eventually you will run out of space. Conversely, if you automatically delete messages based on queue size or age, they may be lost without ever being read.
779
+
780
+ You also need to think about how you will [track the consumer offset](https://www.rabbitmq.com/blog/2021/09/13/rabbitmq-streams-offset-tracking). Typically you will need to store this in a database after successfully processing the message and use it to tell the broker where to resume from after your application restarts. For example...
781
+
782
+ ```js
783
+ const initialOffset = (await loadOffset('/my-queue')) || 'first';
784
+
785
+ const overrides = {
786
+ options: {
787
+ arguments: {
788
+ 'x-stream-offset': initialOffset
789
+ }
790
+ }
791
+ };
792
+
793
+ const subscription = await broker.subscribe('/my-queue', overrides);
794
+
795
+ subscription.on('message', async (message, content, ackOrNack) => {
796
+ const currentOffset = message.properties.headers['x-stream-offset'];
797
+ try {
798
+ await handleMessage(content);
799
+ await updateOffset('/my-queue', currentOffset);
800
+ } catch (err) {
801
+ await handleError('/my-queue', currentOffset, err);
802
+ } finally {
803
+ ackOrNack(); // Streams do not support nack so do not pass the error argument
804
+ }
805
+ });
806
+ ```
807
+
808
+ However, if your application is offline for too long, and messages are still being published to the stream, it may not be able to resume from where you left off, since those messages may have been deleted. Furthremore, if your application consumes messages concurrently, you need to think about how you will recover should one fail. If you naively override the previouly saved offset, you may be replacing a higher/later offset with an lower/older one, causing in your application to restart from the wrong point. Finally, you also need to decide what to do if the message cannot be processed. You cannot simply replay the message since you are working with a stream, rather than a classic queue. You could cancel the subscription and resume from the current offset, but this will lead to duplicates if you have been consuming messages concurrently. Alternatively you could republish the failures to a dead letter queue and process them separately.
809
+
810
+ For the above reasons, we only recommend considering streams when you genuinely need the extra throughput.
811
+
756
812
  #### bindings
757
813
 
758
814
  You can bind exchanges to exchanges, or exchanges to queues.
@@ -0,0 +1,13 @@
1
+ {
2
+ "name": "streams",
3
+ "version": "1.0.0",
4
+ "lockfileVersion": 1,
5
+ "requires": true,
6
+ "dependencies": {
7
+ "random-readable": {
8
+ "version": "1.0.1",
9
+ "resolved": "https://registry.npmjs.org/random-readable/-/random-readable-1.0.1.tgz",
10
+ "integrity": "sha512-Y++VltLA4yRsvFDAPbODh9hMw7cfkng+c/S+44ob6xGt0itLr8s6VhANl7kY7igEv3igPgzdc+T8EhBjQWjd9g=="
11
+ }
12
+ }
13
+ }
@@ -0,0 +1,14 @@
1
+ {
2
+ "name": "streams",
3
+ "version": "1.0.0",
4
+ "description": "",
5
+ "main": "index.js",
6
+ "scripts": {
7
+ "test": "echo \"Error: no test specified\" && exit 1"
8
+ },
9
+ "author": "",
10
+ "license": "ISC",
11
+ "dependencies": {
12
+ "random-readable": "^1.0.1"
13
+ }
14
+ }
@@ -0,0 +1,37 @@
1
+ {
2
+ "$schema": "../../lib/config/schema.json",
3
+ "vhosts": {
4
+ "/": {
5
+ "publicationChannelPools": {
6
+ "regularPool": {
7
+ "max": 10,
8
+ "min": 10,
9
+ "evictionRunIntervalMillis": 1000,
10
+ "idleTimeoutMillis": 5000,
11
+ "autostart": true
12
+ }
13
+ },
14
+ "connection": {
15
+ "socketOptions": {
16
+ "timeout": 1000
17
+ }
18
+ },
19
+ "queues": {
20
+ "demo_stream": {
21
+ "options": {
22
+ "arguments": {
23
+ "x-queue-type": "stream",
24
+ "x-max-length-bytes": 10485760
25
+ }
26
+ }
27
+ }
28
+ },
29
+ "publications": {
30
+ "demo_pub": {
31
+ "queue": "demo_stream",
32
+ "confirm": false
33
+ }
34
+ }
35
+ }
36
+ }
37
+ }
@@ -0,0 +1,38 @@
1
+ const Rascal = require('../..');
2
+ const config = require('./publisher-config');
3
+ const random = require('random-readable');
4
+ const max = parseInt(process.argv[2], 10) || Infinity;
5
+
6
+ Rascal.Broker.create(Rascal.withDefaultConfig(config), (err, broker) => {
7
+ if (err) throw err;
8
+
9
+ broker.on('error', console.error);
10
+
11
+ let count = 0;
12
+
13
+ const stream = random
14
+ .createRandomStream()
15
+ .on('error', console.error)
16
+ .on('data', (data) => {
17
+ broker.publish('demo_pub', data, (err, publication) => {
18
+ if (err) throw err;
19
+ else if (count >= max) stream.destroy();
20
+ else count++;
21
+ publication.on('error', console.error);
22
+ });
23
+ })
24
+ .on('close', () => {
25
+ console.log(`Published ${count} messages`)
26
+ broker.shutdown();
27
+ }); ;
28
+
29
+ broker.on('busy', (details) => {
30
+ console.log(Date.now(), `Pausing vhost: ${details.vhost} (mode: ${details.mode}, queue: ${details.queue}, size: ${details.size}, borrowed: ${details.borrowed}, available: ${details.available})`);
31
+ stream.pause();
32
+ });
33
+
34
+ broker.on('ready', (details) => {
35
+ console.log(Date.now(), `Resuming vhost: ${details.vhost} (mode: ${details.mode}, queue: ${details.queue}, size: ${details.size}, borrowed: ${details.borrowed}, available: ${details.available})`);
36
+ stream.resume();
37
+ });
38
+ });
@@ -0,0 +1,28 @@
1
+ {
2
+ "$schema": "../../lib/config/schema.json",
3
+ "vhosts": {
4
+ "/": {
5
+ "connection": {
6
+ "socketOptions": {
7
+ "timeout": 1000
8
+ }
9
+ },
10
+ "queues": {
11
+ "demo_stream": {
12
+ "options": {
13
+ "arguments": {
14
+ "x-queue-type": "stream",
15
+ "x-max-length-bytes": 10485760
16
+ }
17
+ }
18
+ }
19
+ },
20
+ "subscriptions": {
21
+ "demo_sub": {
22
+ "queue": "demo_stream",
23
+ "prefetch": 250
24
+ }
25
+ }
26
+ }
27
+ }
28
+ }
@@ -0,0 +1,25 @@
1
+ const Rascal = require('../..');
2
+ const config = require('./subscriber-config');
3
+ const offset = parseInt(process.argv[2], 10) || 'first';
4
+
5
+ Rascal.Broker.create(Rascal.withDefaultConfig(config), (err, broker) => {
6
+ if (err) throw err;
7
+
8
+ broker.on('error', console.error);
9
+
10
+ const overrides = {
11
+ options: {
12
+ arguments: {
13
+ 'x-stream-offset': offset
14
+ }
15
+ }
16
+ };
17
+
18
+ broker.subscribe('demo_sub', overrides, (err, subscription) => {
19
+ if (err) throw err;
20
+ subscription.on('message', (message, content, ackOrNack) => {
21
+ console.log(`Received message: ${message.properties.headers['x-stream-offset']}`)
22
+ ackOrNack();
23
+ });
24
+ });
25
+ });
package/lib/amqp/Vhost.js CHANGED
@@ -201,8 +201,7 @@ function Vhost(vhostConfig, components) {
201
201
  createChannelWhenInitialised(options.confirm, (err, channel) => {
202
202
  if (err) return deferRejection(reject, err);
203
203
  if (!channel) return deferRejection(reject, new Error('Vhost is shutting down'));
204
- const destroyChannel = _.once((err) => {
205
- console.log('Destroying channel', err);
204
+ const destroyChannel = _.once(() => {
206
205
  debug('Destroying %s channel: %s for vhost: %s due to error or close event', mode, channel._rascal_id, vhostConfig.name);
207
206
  channel._rascal_closed = true;
208
207
  if (pool.isBorrowedResource(channel)) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "rascal",
3
- "version": "18.0.0",
3
+ "version": "18.0.1",
4
4
  "description": "A config driven wrapper for amqplib supporting multi-host connections, automatic error recovery, redelivery flood protection, transparent encryption / decryption, channel pooling and publication timeouts",
5
5
  "main": "index.js",
6
6
  "dependencies": {