mqtt-core 0.0.1.ci.release → 0.9.0.rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 03cdb9467d4797a91a9751dd5f0421d4f0a231a3084314dbbf9b58d34812c7a8
4
- data.tar.gz: 17800776d5fc4fd0698758bbcc79fc29d64f65a7c1d7d3ce91265faf55646c2a
3
+ metadata.gz: 5d3ac3a37e9bd2c03f3461a9121fdb80392c4c30800185eaead46f082b6315a1
4
+ data.tar.gz: ab3f1dac57c2ec1bfa6903c07aad6f1b1ceaddabaac07dcccd30e8ddc76c051f
5
5
  SHA512:
6
- metadata.gz: eedd90f6fe41a5e72ea11a990507c168b970e203af0a30d079ca70a76dc71b1f8bdac6c915b84b7de4abb7097ba99593683938c9e23e5f14a869d4849950ea8b
7
- data.tar.gz: ced170d3f87f4fee052ddc4e5e9f282b3d3be257c0eb2c2b50278e3577e087d80f9fe1b7eb1e740474224f1244ca2b7e2f14f35c8c88a653dd80f9498b27fd57
6
+ metadata.gz: 5946a64c4cf7a87ec00f9c1cc7a54caf27d401293fd07f5e95f2d6f29dae474cc20ca3db462c05a2f8123deebbacb9918b581a03df02fdc15c76e6771dc87003
7
+ data.tar.gz: c6a1db949580d747ccf4c463eaf4e7afc417cff883c5d078f3fcf2c0fe50fbf8dcca1bf862787b350d61fd9537598c25c6ae86d5b386aa370ae2bd53c3f2a7ed
@@ -9,12 +9,14 @@ module MQTT
9
9
  #
10
10
  # Method variants
11
11
  #
12
- # - methods with aliases suffixed `messages` yield deconstructed topic, payload, and attributes
12
+ # - methods with aliases suffixed `messages` yield deconstructed topic, payload, and version-specific attributes
13
+ # * v3 - :qos, :retain
14
+ # * v5 - :qos, :retain, :message_expiry_interval, :response_topic, :correlation_data, :content_type,
15
+ # :user_properties
13
16
  # - methods suffixed with `packets` yield raw `PUBLISH` packets
14
17
  # - methods prefixed with `async` perform enumeration in a new thread
15
- # - methods suffixed with bang `!` ensure {#unsubscribe}
16
18
  # - methods prefixed with `lazy` return lazy enumerators for advanced chaining
17
- #
19
+ # - methods suffixed with bang `!` ensure {#unsubscribe} when enumeration completes.
18
20
  class EnumerableSubscription < Subscription
19
21
  include Enumerable
20
22
 
@@ -28,13 +30,25 @@ module MQTT
28
30
  # @yieldparam [String] topic the message topic.
29
31
  # @yieldparam [String] payload the message payload.
30
32
  # @yieldparam [Hash<Symbol>] attributes additional `PUBLISH` packet attributes.
33
+ #
34
+ # Optional attributes may not be present. Use default values if necessary.
31
35
  # @yieldreturn [$0]
32
36
 
37
+ # @!macro [new] enum_messages
38
+ #
39
+ # Block can throw :unsubscribe to close the subscription and enumeration.
40
+ # @!macro yield_messages(void)
41
+
33
42
  # @!macro [new] yield_packets
34
43
  # @yield [packet]
35
44
  # @yieldparam [Packet] packet a `PUBLISH` packet.
36
45
  # @yieldreturn [$0]
37
46
 
47
+ # @!macro [new] enum_packets
48
+ #
49
+ # Block can throw :unsubscribe to close the subscription and enumeration.
50
+ # @!macro yield_packets(void)
51
+
38
52
  # @!macro [new] enum_return
39
53
  # @return [void] when block given
40
54
  # @return [Enumerator] an enumerator when no block given.
@@ -44,33 +58,27 @@ module MQTT
44
58
  # @return [Enumerator::Lazy] a lazy enumerator when no block given.
45
59
 
46
60
  # @!macro [new] async_return
47
- # @return [self, ConcurrentMonitor::Task]
48
- # a pair containing self and the task that is iterating over the messages.
49
-
50
- # @!macro [new] qos_note
51
- # @note QoS 1/2 packets are marked as completely handled in the session store when the given block completes.
52
- # If no block is given, completion is marked *before* the packet is returned.
61
+ # @return [self, ConcurrentMonitor::Task...]
62
+ # self and the task iterating over the messages.
53
63
 
54
64
  # Get one packet, blocking until available
55
65
  #
56
- # @!macro qos_note
57
66
  # @!macro yield_packets(Object)
58
67
  # @return [Packet] a `PUBLISH` packet when no block given
59
68
  # @return [Object] the block result when block given
60
69
  # @return [nil] when unsubscribed or disconnected
61
70
  def get_packet(&)
62
- handle(handler.dequeue, &)
71
+ handle(dequeue, &)
63
72
  end
64
73
 
65
74
  # Get one message, blocking until available
66
75
  #
67
- # @!macro qos_note
68
76
  # @!macro yield_messages(Object)
69
77
  # @return [String, String, Hash<Symbol>] topic, payload, and attributes when no block given
70
78
  # @return [Object] the block result when block given
71
79
  # @return [nil] when unsubscribed or disconnected
72
80
  def get(&)
73
- get_packet { |pkt| pkt.deconstruct_message(&) }
81
+ get_packet { |pkt| pkt&.deconstruct_message(&) }
74
82
  end
75
83
  alias get_message get
76
84
 
@@ -83,7 +91,7 @@ module MQTT
83
91
  get_packet do |packet|
84
92
  raise StopIteration unless packet
85
93
 
86
- block_given? ? yield(packet) : packet
94
+ (block_given? ? yield(packet) : packet)
87
95
  end
88
96
  end
89
97
 
@@ -98,32 +106,32 @@ module MQTT
98
106
  alias read_message read
99
107
 
100
108
  # Enumerate packets
101
- # @!macro yield_packets(void)
109
+ # @!macro enum_packets
102
110
  # @!macro enum_return
103
111
  def each_packet(&)
104
112
  return enum_for(__method__) unless block_given?
105
113
 
106
- loop { read_packet(&) }
114
+ loop { read_packet { |pkt| catch_unsubscribe(pkt, &) } }
107
115
  end
108
116
 
109
117
  # Enumerate packets, ensuring {#unsubscribe}
110
- # @!macro yield_packets(void)
118
+ # @!macro enum_packets
111
119
  # @!macro enum_return
112
120
  def each_packet!(&) = enum_for!(__method__, &)
113
121
 
114
122
  # Enumerate messages
115
- # @!macro yield_messages(void)
123
+ # @!macro enum_messages
116
124
  # @!macro enum_return
117
- def each
125
+ def each(&)
118
126
  return enum_for(__method__) unless block_given?
119
127
 
120
- each_packet { |pkt| yield(*pkt.deconstruct_message) }
128
+ each_packet { |pkt| pkt.deconstruct_message(&) }
121
129
  end
122
130
 
123
131
  alias each_message each
124
132
 
125
133
  # Enumerate messages, ensuring {#unsubscribe}
126
- # @!macro yield_messages(void)
134
+ # @!macro enum_messages
127
135
  # @!macro enum_return
128
136
  def each!(&) = enum_for!(__method__, &)
129
137
  alias each_message! each!
@@ -148,46 +156,53 @@ module MQTT
148
156
 
149
157
  # Return a lazy packet enumerator for advanced chaining
150
158
  # @return [Enumerator::Lazy<Packet>] lazy enumerator yielding PUBLISH packets
159
+ # @see each_packet
151
160
  def lazy_packets
152
161
  each_packet.lazy
153
162
  end
154
163
 
155
164
  # Return a lazy packet enumerator with auto-unsubscribe
156
165
  # @return [Enumerator::Lazy<Packet>] lazy enumerator yielding PUBLISH packets
166
+ # @see each_packet!
157
167
  def lazy_packets!
158
168
  each_packet!.lazy
159
169
  end
160
170
 
161
171
  # Enumerate messages in a new thread
162
- # @overload async(&)
163
- # @!macro yield_messages(void)
172
+ # @overload async(via: client, **via_opts, &)
173
+ # @param via [:async] used to start a new task
174
+ # @param via_opts [Hash<Symbol>] passed to `via.async`
175
+ # @!macro enum_messages
164
176
  # @!macro async_return
165
177
  # @see each
166
- def async(method = :each, &)
178
+ def async(method = :each, via: client, **via_opts, &)
167
179
  raise ArgumentError, 'block is required for async enumeration' unless block_given?
168
180
 
169
- [self, client.async("sub.#{method}") { send(method, &) }]
181
+ [self, via.async(**via_opts) { send(method, &) }]
170
182
  end
171
183
  alias async_messages async
172
184
 
173
185
  # Enumerate messages in a new thread, ensuring {#unsubscribe}
174
- # @!macro yield_messages(void)
186
+ # @!macro enum_messages
175
187
  # @!macro async_return
176
188
  # @see each!
177
- def async!(&) = async(:each!, &)
189
+ # @see async
190
+ def async!(**, &) = async(:each!, **, &)
178
191
  alias async_messages! async!
179
192
 
180
193
  # Enumerate packets in a new thread
181
- # @!macro yield_packets(void)
194
+ # @!macro enum_packets
182
195
  # @!macro async_return
183
196
  # @see each_packet
184
- def async_packets(&) = async(:each_packet, &)
197
+ # @see async
198
+ def async_packets(**, &) = async(:each_packet, **, &)
185
199
 
186
200
  # Enumerate packets in a new thread, ensuring {#unsubscribe}
187
- # @!macro yield_packets(void)
201
+ # @!macro enum_packets
188
202
  # @!macro async_return
189
203
  # @see each_packet!
190
- def async_packets!(&) = async(:each_packet!, &)
204
+ # @see async
205
+ def async_packets!(**, &) = async(:each_packet!, **, &)
191
206
 
192
207
  # Delegates Enumerable methods ending in `!` to {#each!}, ensuring {#unsubscribe}
193
208
  #
@@ -209,6 +224,20 @@ module MQTT
209
224
 
210
225
  private
211
226
 
227
+ def dequeue
228
+ handler.dequeue.tap do |pkt|
229
+ # propagate the close signal to potential multiple readers.
230
+ handler.enqueue(pkt) if !pkt || pkt.is_a?(StandardError)
231
+
232
+ raise pkt if pkt.is_a?(StandardError)
233
+ end
234
+ end
235
+
236
+ def catch_unsubscribe(packet, &)
237
+ # Yield the block, returning immediately unless :unsubscribe is thrown
238
+ catch(:unsubscribe) { return yield packet }.tap { unsubscribe }
239
+ end
240
+
212
241
  def respond_to_missing?(method, include_private = false)
213
242
  (method.end_with?('!') && Enumerable.public_instance_methods.include?(method[..-2].to_sym)) || super
214
243
  end
@@ -9,8 +9,12 @@ module MQTT
9
9
  module Core
10
10
  class Client
11
11
  # A Session Store that holds packets in the filesystem.
12
+ #
13
+ # Persists outbound QoS 1/2 packets for retry across process restarts.
14
+ # QoS2 inbound deduplication state (packet ids awaiting PUBREL) is also persisted.
12
15
  class FilesystemSessionStore < Qos2SessionStore
13
16
  attr_reader :client_dir, :base_dir, :session_expiry_file
17
+ attr_accessor :disconnect_expiry_interval
14
18
 
15
19
  # @param [String] base_dir the base directory to store session files in
16
20
  # @param [String|nil] client_id
@@ -19,16 +23,16 @@ module MQTT
19
23
  # zero is not permitted, but nil represents never expire (server may negotiate a lower value)
20
24
  def initialize(client_id:, expiry_interval:, base_dir: Dir.mktmpdir('mqtt'))
21
25
  @base_dir = Pathname.new(base_dir)
22
- @client_dir = (base_dir + client_id)
26
+ @client_dir = (@base_dir + client_id)
27
+ @disconnect_expiry_interval = nil # Default: don't change expiry on disconnect
23
28
  super(client_id:, expiry_interval:)
24
29
 
25
- @session_expiry_file = (base_dir + "#{client_id}.expiry")
26
- cleanup_tmp
30
+ @session_expiry_file = (@base_dir + "#{client_id}.expiry")
27
31
  log.info { "client_dir: #{@client_dir}, clean?: #{clean?}" }
28
32
  end
29
33
 
30
34
  def restart_clone
31
- self.class.new(base_dir, client_id:, expiry_interval:)
35
+ self.class.new(base_dir:, client_id:, expiry_interval:)
32
36
  end
33
37
 
34
38
  def clean?
@@ -36,7 +40,7 @@ module MQTT
36
40
  end
37
41
 
38
42
  def connected!
39
- client_dirs.each(&:mkpath)
43
+ pkt_dir.mkpath
40
44
 
41
45
  # record the previous session expiry duration so we can check it on a future restart
42
46
  session_expiry_file.open('w') { |f| f.write(expiry_interval.to_s) }
@@ -50,10 +54,10 @@ module MQTT
50
54
  return false unless session_expiry_file.exist?
51
55
 
52
56
  # choose the most recent of...
53
- # * the directory modification times (updated each time a packet file is added or removed),
57
+ # * the pkt directory modification time (updated each time a packet file is added or removed),
54
58
  # * the session_expiry_file modification time (updated on disconnect)
55
59
  # A hard crash without a clean disconnect will potentially expire a session earlier than the server
56
- Time.now - (client_dirs + [session_expiry_file]).map(&:mtime).max > session_expiry_file.read.to_i
60
+ Time.now - [pkt_dir, session_expiry_file].select(&:exist?).map(&:mtime).max > session_expiry_file.read.to_i
57
61
  end
58
62
 
59
63
  def store_packet(packet, replace: false)
@@ -71,125 +75,38 @@ module MQTT
71
75
  end
72
76
 
73
77
  def retry_packets(&)
74
- @client_dir.glob('pkt.*').sort_by(&:mtime).map { |f| f.open('r', &) }
78
+ pkt_dir.glob('*.mqtt').sort_by(&:mtime).map { |f| f.open('r', &) }
75
79
  end
76
80
 
77
81
  def packet_file(id)
78
82
  @client_dir + format('pkt/%04x.mqtt', id)
79
83
  end
80
84
 
81
- # QOS Receive
82
- # Unique ID is sortable (fixed width timestamp)
83
- # qos1 live: `/#{client_id}/qos1/#{unique_id}_#{packet_id}.live` write on PUBLISH, deleted on handled.
84
- # qos2 live: `/#{client_id}/qos2/#{unique_id}_#{packet_id}.live` (unhandled, unreleased)
85
- # qos2 handled: `/#{client_id}/qos2/#{unique_id}_#{packet_id}.handled` (handled, unreleased)
86
- # qos2 released: `/#{client_id}/qos2/#{unique_id}_#{packet_id}.released` (unhandled, released)
87
- # qos2 replay: '/#{client_id}/qos2/#{unique_id}_#{packet_id}.replay_[live|handled]
88
-
89
- # TODO: Recover utility
90
- # * cleanup_tmp
91
- # * qos2/*.live - rename to .replay_live or .handled
92
- # * qos2/*.released - rename to .replay_released or delete
93
-
94
- def store_qos_received(packet, unique_id)
95
- client_dir + qos_path(packet.qos, packet.id, unique_id).tap do |live_file|
96
- tmp_file = live_file.sub_ext('live', 'tmp')
97
- tmp_file.open('wb') { |f| packet.serialize(f) }
98
- tmp_file.rename(live_file)
99
- end
100
- end
101
-
102
- # Release the pending qos2 (return true if we had previously seen it)
103
- def qos2_release(id)
104
- qos2_live = find_qos2_file(id)
105
-
106
- if qos2_live&.extname == '.live'
107
- qos2_live.rename(qos2_live.sub_ext('.live', '.released'))
108
- else
109
- qos2_live&.delete
110
- end
111
-
112
- super
113
- rescue Errno::ENOENT
114
- retry
115
- end
116
-
117
- def qos_handled(packet, unique_id)
118
- if packet.qos == 1
119
- qos1_handled(packet, unique_id)
120
- elsif packet.qos == 2
121
- qos2_handled(packet, unique_id)
122
- end
123
- end
124
-
125
- # Called once at initialize.
126
- # rubocop:disable Metrics/AbcSize
85
+ # QoS2 inbound deduplication — recover pending packet ids from filenames
127
86
  def qos2_recover
128
- # Abort if there are unmarked files to potentially replay
129
- if (client_dir.glob('qos2/*.live') + client_dir.glob('qos2/*.released')).any?
130
- raise SessionNotRecoverable, "Unhandled QOS2 messages in #{"#{client_dir}/qos2"}. Run recover utility"
131
- end
132
-
133
- client_dir.glob('qos2/*.replay_live').each { |q2| q2.rename(q2.sub_ext('.live')) }
134
- client_dir.glob('qos2/*.replay_released').each { |q2| q2.rename(q2.sub_ext('.released')) }
135
-
136
- client_dir.glob(%w[qos2/*.live qos2/*.handled]).map { |f| f.basename.to_s.split('_').last.to_i(16) }
87
+ pkt_dir.glob('qos2_*.pending').map { |f| f.basename.to_s[/qos2_([0-9a-f]+)\.pending/, 1].to_i(16) }
137
88
  end
138
- # rubocop:enable Metrics/AbcSize
139
89
 
140
- # Load the unhandled packets with their unique id, only called once per session store
141
- def qos_unhandled_packets(&)
142
- client_dir.glob(%w[qos?/*.live qos2/*.released]).sort_by(&:basename)
143
- .to_h { |f| [f.open('r', &), f.basename.to_s.split('_').first] }
90
+ # Mark a QoS2 packet id as pending (received, awaiting PUBREL)
91
+ def qos2_pending(id)
92
+ FileUtils.touch(qos2_pending_file(id))
144
93
  end
145
94
 
146
- private
147
-
148
- def cleanup_tmp
149
- # Cleanup crashed .tmp files
150
- client_dir.glob('qos?/*.tmp').each(&:delete)
151
- end
152
-
153
- # Make directories.
154
- # pkt - packets we are sending, waiting to be acked
155
- # qos1 - qos1 packets received, waiting to be handled
156
- # qos2 - qos2 packets received, waiting to be released and handled
157
- def client_dirs
158
- %w[pkt qos1 qos2].map { |d| client_dir + d }
159
- end
160
-
161
- def qos2_handled(packet, unique_id)
162
- live_file = client_dir + qos_path(2, packet.id, unique_id)
163
- rel_file = client_dir + qos_path(2, packet.id, unique_id, 'released')
164
-
165
- live_file.rename(live_file.sub_ext('.handled')) if live_file.exist?
166
- rel_file.unlink if rel_file.exist?
95
+ # Release a QoS2 packet id (called before sending PUBCOMP)
96
+ def qos2_release(id)
97
+ qos2_pending_file(id).delete
167
98
  rescue Errno::ENOENT
168
- retry
99
+ # already released
169
100
  end
170
101
 
171
- def qos1_handled(packet, unique_id)
172
- live_file = (client_dir + qos_path(1, packet.id, unique_id))
173
- live_file.unlink
174
- rescue Errno::ENOENT
175
- log.warn { "qos_handled: #{live_file} unexpectedly not exists" }
176
- end
102
+ private
177
103
 
178
- # @return [String]
179
- def qos_path(qos, packet_id, unique_id, ext = 'live')
180
- format('qos%<qos>i/%<unique_id>s_%<packet_id>05x.%<ext>s', qos:, unique_id:, packet_id:, ext:)
104
+ def pkt_dir
105
+ @client_dir / 'pkt'
181
106
  end
182
107
 
183
- def find_qos2_file(id)
184
- # search live and handled separately to avoid race while renaming
185
- live_files = client_dir.glob(qos_path(2, id, '*', 'live'))
186
- raise ProtocolError, "QOS(#{id}): more than one packet: #{live_files}" if live_files.size > 1
187
- return live_files.first if live_files.size == 1
188
-
189
- handled_files = client_dir.glob(qos_path(2, id, '*', 'handled'))
190
- raise ProtocolError, "QOS(#{id}): more than one packet: #{handled_files}" if handled_files.size > 1
191
-
192
- handled_files.first
108
+ def qos2_pending_file(id)
109
+ @client_dir + format('pkt/qos2_%04x.pending', id)
193
110
  end
194
111
  end
195
112
  end
@@ -39,6 +39,10 @@ module MQTT
39
39
  @clean
40
40
  end
41
41
 
42
+ def disconnect_expiry_interval
43
+ 0 # Memory sessions don't survive disconnect anyway
44
+ end
45
+
42
46
  def store_packet(packet, replace: false)
43
47
  raise KeyError, 'packet id already exists' if !replace && stored_packet?(packet.id)
44
48
 
@@ -57,23 +61,11 @@ module MQTT
57
61
  @store.values
58
62
  end
59
63
 
60
- def qos2_recover
61
- [] # nothing to recover
62
- end
64
+ def qos2_recover = []
63
65
 
64
- def qos_unhandled_packets
65
- {} # nothing was persisted
66
- end
66
+ def qos2_pending(_id) = nil
67
67
 
68
- def store_qos_received(packet, unique_id)
69
- # For memory store, we don't need to persist received packets
70
- # This is just for tracking during the current session
71
- end
72
-
73
- def qos_handled(packet, unique_id)
74
- # For memory store, we don't need to persist handled status
75
- # This is just for tracking during the current session
76
- end
68
+ def qos2_release(_id) = nil
77
69
 
78
70
  def restart_clone
79
71
  self # don't actually clone.
@@ -0,0 +1,215 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'forwardable'
4
+
5
+ module MQTT
6
+ module Core
7
+ class Client
8
+ # Routes incoming PUBLISH packets to matching subscriptions
9
+ #
10
+ # The core implementation tracks subscriptions by topic_filter. A {Trie} is used for matching wildcard filters
11
+ # to topics.
12
+ class MessageRouter
13
+ # A trie (prefix tree) for efficiently matching MQTT topics against wildcard patterns.
14
+ #
15
+ # - '+' matches exactly one level (segment between '/')
16
+ # - '#' matches zero or more remaining levels (must be last)
17
+ class Trie
18
+ # @!visibility private
19
+ Node = Data.define(:children, :filter) do
20
+ extend Forwardable
21
+
22
+ # the filters set only ever has one entry, but it is possible that a level is created as an intermediate
23
+ # entry
24
+ def initialize(children: {}, filter: nil)
25
+ super
26
+ end
27
+
28
+ def redundant?
29
+ empty? && filter.nil?
30
+ end
31
+
32
+ def_delegators :children, :empty?, :[], :[]=, :delete, :include?
33
+ end
34
+
35
+ def initialize
36
+ @root = Node.new
37
+ end
38
+
39
+ # @!visibility private
40
+
41
+ # Add a topic filter to the trie
42
+ # @param filter [String] MQTT topic filter (may contain '+' or '#')
43
+ # @return [self]
44
+ def add(filter)
45
+ *parents, leaf = filter.split('/')
46
+ parent_node = parents.reduce(@root) do |current, level|
47
+ current[level] ||= Node.new
48
+ end
49
+
50
+ leaf_node = parent_node[leaf] ||= Node.new(filter: filter)
51
+ parent_node[leaf] = Node.new(children: leaf_node.children, filter: filter) unless leaf_node.filter
52
+ self
53
+ end
54
+
55
+ # Remove a topic filter from the trie
56
+ # @param filter [String] MQTT topic filter to remove
57
+ # @return [self]
58
+ def remove(filter)
59
+ levels = filter.split('/')
60
+ remove_recursive(@root, levels, filter, 0)
61
+ self
62
+ end
63
+
64
+ # Find all topic filters that match a given topic name
65
+ # @param topic [String] fully-qualified MQTT topic name (no wildcards)
66
+ # @return [Array<String>] matching topic filters
67
+ def match(topic)
68
+ [].tap { |filters| match_recursive(@root, topic.split('/'), 0, filters) }
69
+ end
70
+
71
+ # Check if the trie is empty
72
+ # @return [Boolean]
73
+ def empty?
74
+ @root.children.empty?
75
+ end
76
+
77
+ private
78
+
79
+ def remove_recursive(node, levels, filter, depth)
80
+ level = levels[depth]
81
+ child = node[level]
82
+
83
+ return unless child
84
+
85
+ if depth == levels.size - 1
86
+ if child.empty?
87
+ node.delete(level)
88
+ elsif child.filter
89
+ # Remove the filter, keep the children
90
+ node[level] = Node.new(children: child.children)
91
+ end
92
+ return
93
+ end
94
+
95
+ remove_recursive(child, levels, filter, depth + 1)
96
+ node.children.delete(level) if child.redundant?
97
+ end
98
+
99
+ def match_recursive(node, levels, depth, filters)
100
+ # If we've matched all levels, collect this filter if it has one
101
+ return filters << node.filter if depth == levels.size && node.filter
102
+
103
+ # Keep going
104
+ level = levels[depth]
105
+ match_recursive(node[level], levels, depth + 1, filters) if node.include?(level)
106
+
107
+ # Also single level '+'
108
+ match_recursive(node['+'], levels, depth + 1, filters) if node.include?('+')
109
+
110
+ filters << node['#'].filter if node.include?('#')
111
+ end
112
+ end
113
+
114
+ include Logger
115
+ include ConcurrentMonitor
116
+ extend Forwardable
117
+
118
+ def initialize(monitor:)
119
+ @monitor = monitor.new_monitor
120
+ @subs = Hash.new { |h, k| h[k] = Set.new }
121
+ @topic_trie = Trie.new
122
+ end
123
+
124
+ # @!visibility private
125
+
126
+ # Register subscriptions for routing before SUBSCRIBE is sent
127
+ def register(subscription:, subscribe:)
128
+ synchronize do
129
+ register_sync(subscription:, subscribe:)
130
+ (subscribe.subscribed_topic_filters - subscription.topic_filters.to_a).tap do |new_filters|
131
+ subscription.topic_filters.merge(new_filters) if new_filters.any?
132
+ end
133
+ end
134
+ end
135
+
136
+ # Deregister a subscription (or specific filters) from routing (before UNSUBSCRIBE is sent)
137
+ # Removes the subscription from @subs for the given filters (default: all registered filters).
138
+ # @return [Array<String>] filters that are now inactive (no remaining subscriptions) and safe to UNSUBSCRIBE
139
+ def deregister(*filters, subscription:)
140
+ synchronize do
141
+ filters = subscription.topic_filters.to_a if filters.empty?
142
+ subscription.topic_filters.subtract(filters)
143
+ deregister_filters(subscription, filters)
144
+ end
145
+ end
146
+
147
+ # Route packet to matching subscriptions
148
+ def route(packet)
149
+ synchronize { subs_for(matching_filters(packet)) }
150
+ end
151
+
152
+ # return all subscriptions, then clear them
153
+ def clear
154
+ synchronize { all_subscriptions.tap { reset } }
155
+ end
156
+
157
+ private
158
+
159
+ def reset
160
+ @subs.clear
161
+ @topic_trie = Trie.new
162
+ end
163
+
164
+ def subs_for(filters)
165
+ filters.flat_map { |f| @subs.fetch(f, []).to_a }.uniq
166
+ end
167
+
168
+ # TODO: we used to check for duplicate filters which we don't need, but also warn about duplicated retained
169
+ # messages which is still a thing for OVERLAPPING filters.
170
+ # We might still need to warn about that or handled that in Subscription put
171
+ # (to skip messages with retain flag after seeing one without, OR warn here if RAP or RH is set and
172
+ # there are overlapping filters
173
+
174
+ def register_sync(subscription:, subscribe:, use_trie: true)
175
+ filters = subscribe.subscribed_topic_filters
176
+
177
+ filters.each do |filter|
178
+ @subs[filter] << subscription
179
+ @topic_trie.add(filter) if use_trie && Subscription::Filters.wildcard_filter?(filter)
180
+ end
181
+ end
182
+
183
+ # Remove subscription from given filters, clean up empty filters
184
+ # @return [Array<String>] filters that are now inactive (no remaining subscriptions)
185
+ def deregister_filters(subscription, filters)
186
+ filters.each_with_object([]) do |filter, inactive|
187
+ next unless (subs_set = @subs[filter])
188
+
189
+ subs_set.delete(subscription)
190
+ next unless subs_set.empty?
191
+
192
+ remove_filter(filter)
193
+ inactive << filter
194
+ end
195
+ end
196
+
197
+ # called by: deregister when there sre no remaining subscriptions for a filter
198
+ # called by: unsubscribe when successfully unsubscribed
199
+ def remove_filter(filter)
200
+ @subs.delete(filter)
201
+ @topic_trie.remove(filter) if Client::Subscription::Filters.wildcard_filter?(filter)
202
+ end
203
+
204
+ def matching_filters(pkt)
205
+ topic = pkt.topic_name
206
+ [topic, *@topic_trie.match(topic)]
207
+ end
208
+
209
+ def all_subscriptions
210
+ @subs.values.flat_map(&:to_a).uniq
211
+ end
212
+ end
213
+ end
214
+ end
215
+ end
@@ -21,6 +21,10 @@ module MQTT
21
21
  0
22
22
  end
23
23
 
24
+ def disconnect_expiry_interval
25
+ 0 # QoS0 sessions don't persist
26
+ end
27
+
24
28
  def connected!
25
29
  @store = Set.new
26
30
  end