dns-sd-browser 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +670 -0
- package/dist/browser.d.ts +145 -0
- package/dist/constants.d.ts +12 -0
- package/dist/dns.d.ts +155 -0
- package/dist/index.d.ts +113 -0
- package/dist/service.d.ts +115 -0
- package/dist/transport.d.ts +67 -0
- package/lib/browser.js +1661 -0
- package/lib/constants.js +17 -0
- package/lib/dns.js +685 -0
- package/lib/index.js +252 -0
- package/lib/service.js +152 -0
- package/lib/transport.js +345 -0
- package/package.json +44 -0
package/lib/browser.js
ADDED
|
@@ -0,0 +1,1661 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* ServiceBrowser — discovers DNS-SD services via mDNS and exposes them
|
|
3
|
+
* as an async iterable stream of events.
|
|
4
|
+
*
|
|
5
|
+
* Implements continuous querying per RFC 6762 §5.2:
|
|
6
|
+
* - Sends an initial query immediately
|
|
7
|
+
* - Repeats at increasing intervals (1s, 2s, 4s, …) up to 60 minutes
|
|
8
|
+
* - Includes known answers for suppression (RFC 6762 §7.1)
|
|
9
|
+
*
|
|
10
|
+
* @module
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import { RecordType } from './dns.js'
|
|
14
|
+
import { parseTxtData, extractInstanceName } from './service.js'
|
|
15
|
+
import { SERVICE_TYPE_ENUMERATION } from './constants.js'
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* @typedef {import('./service.js').Service} Service
|
|
19
|
+
* @typedef {import('./dns.js').DnsPacket} DnsPacket
|
|
20
|
+
* @typedef {import('./dns.js').DnsRecord} DnsRecord
|
|
21
|
+
* @typedef {import('./transport.js').MdnsTransport} MdnsTransport
|
|
22
|
+
*/
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* @typedef {{ type: 'serviceUp', service: Service }
|
|
26
|
+
* | { type: 'serviceDown', service: Service }
|
|
27
|
+
* | { type: 'serviceUpdated', service: Service }} BrowseEvent
|
|
28
|
+
*/
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Resolves a promise externally — used for the async iterator's event queue.
|
|
32
|
+
* @template T
|
|
33
|
+
* @typedef {object} Deferred
|
|
34
|
+
* @property {Promise<T>} promise
|
|
35
|
+
* @property {(value: T) => void} resolve
|
|
36
|
+
* @property {(reason?: unknown) => void} reject
|
|
37
|
+
*/
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* @template T
|
|
41
|
+
* @returns {Deferred<T>}
|
|
42
|
+
*/
|
|
43
|
+
function createDeferred() {
|
|
44
|
+
/** @type {(value: T) => void} */
|
|
45
|
+
let resolve
|
|
46
|
+
/** @type {(reason?: unknown) => void} */
|
|
47
|
+
let reject
|
|
48
|
+
const promise = new Promise((res, rej) => {
|
|
49
|
+
resolve = res
|
|
50
|
+
reject = rej
|
|
51
|
+
})
|
|
52
|
+
// @ts-ignore — assigned in Promise constructor
|
|
53
|
+
return { promise, resolve, reject }
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// Query interval schedule per RFC 6762 §5.2:
|
|
57
|
+
// Start at 1s, double each time, cap at 1 hour (3600s)
|
|
58
|
+
const QUERY_INTERVALS_MS = [
|
|
59
|
+
1_000, 2_000, 4_000, 8_000, 16_000, 32_000, 60_000, 120_000, 240_000,
|
|
60
|
+
480_000, 960_000, 1_920_000, 3_600_000,
|
|
61
|
+
]
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* Maximum number of services a single browser will track.
|
|
65
|
+
* Prevents memory exhaustion from an attacker flooding the multicast group
|
|
66
|
+
* with unique service names (analogous to CVE-2025-59529 in Avahi).
|
|
67
|
+
*/
|
|
68
|
+
const MAX_SERVICES = 1024
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Maximum number of events buffered before the oldest are dropped.
|
|
72
|
+
* Prevents memory exhaustion when events are produced faster than consumed.
|
|
73
|
+
*/
|
|
74
|
+
const MAX_EVENT_BUFFER = 4096
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Minimum TTL check delay (ms). Even if a record claims TTL=0,
|
|
78
|
+
* we won't schedule a check sooner than this to avoid busy-looping.
|
|
79
|
+
*/
|
|
80
|
+
const MIN_TTL_CHECK_DELAY_MS = 1_000
|
|
81
|
+
|
|
82
|
+
/**
|
|
83
|
+
* TTL refresh thresholds per RFC 6762 §5.2.
|
|
84
|
+
* Queries are sent at 80%, 85%, 90%, and 95% of TTL (with 2% jitter each).
|
|
85
|
+
* If no answer is received by 100%, the record is removed.
|
|
86
|
+
*/
|
|
87
|
+
const TTL_REFRESH_THRESHOLDS = [0.80, 0.85, 0.90, 0.95]
|
|
88
|
+
|
|
89
|
+
export class ServiceBrowser {
|
|
90
|
+
/** @type {MdnsTransport} */
|
|
91
|
+
#transport
|
|
92
|
+
#queryName
|
|
93
|
+
#serviceType
|
|
94
|
+
#domain
|
|
95
|
+
#protocol
|
|
96
|
+
#destroyed = false
|
|
97
|
+
/** When true, this is a service type enumeration browser (browseAll) */
|
|
98
|
+
#isTypeEnumeration = false
|
|
99
|
+
|
|
100
|
+
/**
|
|
101
|
+
* Currently discovered services, keyed by FQDN.
|
|
102
|
+
* @type {Map<string, Service>}
|
|
103
|
+
*/
|
|
104
|
+
services = new Map()
|
|
105
|
+
|
|
106
|
+
/** @type {BrowseEvent[]} - Buffered events waiting for a consumer */
|
|
107
|
+
#eventBuffer = []
|
|
108
|
+
|
|
109
|
+
/** @type {Deferred<void> | null} - Resolves when a new event is buffered */
|
|
110
|
+
#eventSignal = null
|
|
111
|
+
|
|
112
|
+
/** @type {ReturnType<typeof setTimeout> | null} */
|
|
113
|
+
#queryTimer = null
|
|
114
|
+
#queryIndex = 0
|
|
115
|
+
|
|
116
|
+
/** @type {AbortSignal | undefined} */
|
|
117
|
+
#signal
|
|
118
|
+
|
|
119
|
+
/** @type {(() => void) | null} */
|
|
120
|
+
#abortHandler = null
|
|
121
|
+
|
|
122
|
+
/** Whether an async iterator is currently active (single-consumer enforcement) */
|
|
123
|
+
#iterating = false
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Timestamp of the last query we sent, used to ignore our own loopback
|
|
127
|
+
* queries in duplicate question suppression (RFC 6762 §7.3).
|
|
128
|
+
* @type {number}
|
|
129
|
+
*/
|
|
130
|
+
#lastQuerySentAt = 0
|
|
131
|
+
|
|
132
|
+
/**
|
|
133
|
+
* If set, the iterator throws this instead of returning done: true,
|
|
134
|
+
* matching the Node.js convention (events.on, Readable, setInterval).
|
|
135
|
+
* @type {any}
|
|
136
|
+
*/
|
|
137
|
+
#destroyReason = undefined
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* Tracks PTR records we've received, mapping service FQDN → TTL.
|
|
141
|
+
* Used for known-answer suppression and TTL-based expiration.
|
|
142
|
+
* @type {Map<string, { ttl: number, receivedAt: number }>}
|
|
143
|
+
*/
|
|
144
|
+
#knownPtrRecords = new Map()
|
|
145
|
+
|
|
146
|
+
/**
|
|
147
|
+
* FQDNs we've seen via PTR but couldn't resolve (no SRV yet).
|
|
148
|
+
* When a standalone SRV arrives later, we re-attempt resolution.
|
|
149
|
+
* @type {Set<string>}
|
|
150
|
+
*/
|
|
151
|
+
#pendingFqdns = new Set()
|
|
152
|
+
|
|
153
|
+
/**
|
|
154
|
+
* Pending goodbye timers, keyed by FQDN.
|
|
155
|
+
* Per RFC 6762 §10.1, goodbye records (TTL=0) schedule removal after 1 second
|
|
156
|
+
* rather than removing immediately, giving the advertiser a window to correct
|
|
157
|
+
* an accidental goodbye.
|
|
158
|
+
* @type {Map<string, ReturnType<typeof setTimeout>>}
|
|
159
|
+
*/
|
|
160
|
+
#pendingGoodbyes = new Map()
|
|
161
|
+
|
|
162
|
+
/**
|
|
163
|
+
* Per-address receive timestamps for cache-flush grace period (RFC 6762 §10.2).
|
|
164
|
+
* When cache-flush is set, records received more than 1 second ago are flushed,
|
|
165
|
+
* but records received within the last 1 second are kept to allow multi-packet
|
|
166
|
+
* announcement bursts.
|
|
167
|
+
* @type {Map<string, Map<string, number>>} - serviceFqdn → (address → receivedAt)
|
|
168
|
+
*/
|
|
169
|
+
#addressTimestamps = new Map()
|
|
170
|
+
|
|
171
|
+
/** @type {ReturnType<typeof setInterval> | null} */
|
|
172
|
+
#ttlCheckTimer = null
|
|
173
|
+
|
|
174
|
+
/** @type {(() => void) | null} */
|
|
175
|
+
#onDestroy = null
|
|
176
|
+
|
|
177
|
+
/**
|
|
178
|
+
* Pending reconfirmation state, keyed by FQDN (RFC 6762 §10.4).
|
|
179
|
+
* Each entry holds the timers for the verification queries and
|
|
180
|
+
* the final flush timeout.
|
|
181
|
+
* @type {Map<string, { timers: ReturnType<typeof setTimeout>[], startedAt: number }>}
|
|
182
|
+
*/
|
|
183
|
+
#pendingReconfirms = new Map()
|
|
184
|
+
|
|
185
|
+
/** Timeout (ms) for reconfirmation before flushing. Default 10s per RFC 6762 §10.4. */
|
|
186
|
+
#reconfirmTimeoutMs = 10_000
|
|
187
|
+
|
|
188
|
+
/**
|
|
189
|
+
* POOF state: tracks queries from other hosts that mention our cached records.
|
|
190
|
+
* Per RFC 6762 §10.5, after 2+ unanswered queries within the timeout window,
|
|
191
|
+
* the record should be flushed from the cache.
|
|
192
|
+
* @type {Map<string, { queryCount: number, firstSeenAt: number, pendingQueries: number }>}
|
|
193
|
+
*/
|
|
194
|
+
#poofTracking = new Map()
|
|
195
|
+
|
|
196
|
+
/** @type {Map<string, ReturnType<typeof setTimeout>>} */
|
|
197
|
+
#poofTimers = new Map()
|
|
198
|
+
|
|
199
|
+
/** Timeout (ms) for POOF window per RFC 6762 §10.5. */
|
|
200
|
+
#poofTimeoutMs = 10_000
|
|
201
|
+
|
|
202
|
+
/** Delay (ms) to wait for a response before counting query as unanswered. */
|
|
203
|
+
#poofResponseWaitMs = 2_000
|
|
204
|
+
|
|
205
|
+
/**
|
|
206
|
+
* @param {MdnsTransport} transport
|
|
207
|
+
* @param {object} options
|
|
208
|
+
* @param {string} options.queryName - e.g. "_http._tcp.local"
|
|
209
|
+
* @param {string} options.serviceType - e.g. "_http._tcp"
|
|
210
|
+
* @param {string} options.domain - e.g. "local"
|
|
211
|
+
* @param {string} options.protocol - e.g. "tcp"
|
|
212
|
+
* @param {boolean} [options.isTypeEnumeration] - true for browseAll
|
|
213
|
+
* @param {AbortSignal} [options.signal]
|
|
214
|
+
* @param {() => void} [options.onDestroy] - Called when the browser is destroyed
|
|
215
|
+
* @param {number} [options.reconfirmTimeoutMs] - Reconfirmation timeout (ms), default 10000
|
|
216
|
+
* @param {number} [options.poofTimeoutMs=10000] - POOF window (RFC 6762 §10.5)
|
|
217
|
+
* @param {number} [options.poofResponseWaitMs=2000] - Time to wait for response before counting as unanswered
|
|
218
|
+
*/
|
|
219
|
+
constructor(transport, { queryName, serviceType, domain, protocol, isTypeEnumeration, signal, onDestroy, reconfirmTimeoutMs, poofTimeoutMs, poofResponseWaitMs }) {
|
|
220
|
+
this.#transport = transport
|
|
221
|
+
this.#queryName = queryName
|
|
222
|
+
this.#serviceType = serviceType
|
|
223
|
+
this.#domain = domain
|
|
224
|
+
this.#protocol = protocol
|
|
225
|
+
this.#isTypeEnumeration = isTypeEnumeration ?? false
|
|
226
|
+
this.#signal = signal
|
|
227
|
+
this.#onDestroy = onDestroy ?? null
|
|
228
|
+
if (reconfirmTimeoutMs !== undefined) {
|
|
229
|
+
this.#reconfirmTimeoutMs = reconfirmTimeoutMs
|
|
230
|
+
}
|
|
231
|
+
this.#poofTimeoutMs = poofTimeoutMs ?? 10_000
|
|
232
|
+
this.#poofResponseWaitMs = poofResponseWaitMs ?? 2_000
|
|
233
|
+
|
|
234
|
+
// Start listening and querying
|
|
235
|
+
this.#transport.addHandler(this.#handlePacket)
|
|
236
|
+
this.#transport.addQueryHandler(this.#handleIncomingQuery)
|
|
237
|
+
this.#transport.addQueryHandler(this.#handleQuery)
|
|
238
|
+
this.#scheduleInitialQuery()
|
|
239
|
+
|
|
240
|
+
// Handle AbortSignal — the abort reason is forwarded to destroy() so the
|
|
241
|
+
// iterator throws it (matching Node.js convention: events.on, Readable, setInterval).
|
|
242
|
+
if (signal) {
|
|
243
|
+
if (signal.aborted) {
|
|
244
|
+
this.destroy(signal.reason)
|
|
245
|
+
} else {
|
|
246
|
+
this.#abortHandler = () => this.destroy(signal.reason)
|
|
247
|
+
signal.addEventListener('abort', this.#abortHandler, { once: true })
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
/**
|
|
253
|
+
* Manually remove a service by FQDN, emitting a `serviceDown` event.
|
|
254
|
+
*
|
|
255
|
+
* Use this when your application detects that a service is unreachable
|
|
256
|
+
* (e.g. via a health check) before its TTL expires. The service is removed
|
|
257
|
+
* from the `services` Map and its known-answer record is cleared, so it
|
|
258
|
+
* will be re-discovered if the advertiser announces it again.
|
|
259
|
+
*
|
|
260
|
+
* @param {string} fqdn - Fully qualified service name (e.g. "My Service._http._tcp.local")
|
|
261
|
+
* @returns {boolean} true if the service was found and removed, false otherwise
|
|
262
|
+
*/
|
|
263
|
+
removeService(fqdn) {
|
|
264
|
+
if (this.#destroyed) throw new Error('Browser has been destroyed')
|
|
265
|
+
if (!this.services.has(fqdn)) return false
|
|
266
|
+
this.#removeService(fqdn)
|
|
267
|
+
return true
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
/**
|
|
271
|
+
* Request reconfirmation of a service record (RFC 6762 §10.4).
|
|
272
|
+
* Sends verification queries and removes the service if no response
|
|
273
|
+
* is received within the timeout.
|
|
274
|
+
* @param {string} fqdn - The service FQDN to reconfirm
|
|
275
|
+
*/
|
|
276
|
+
reconfirm(fqdn) {
|
|
277
|
+
if (this.#destroyed) throw new Error('Browser has been destroyed')
|
|
278
|
+
if (!this.services.has(fqdn)) return
|
|
279
|
+
// Already reconfirming this FQDN — don't double up
|
|
280
|
+
if (this.#pendingReconfirms.has(fqdn)) return
|
|
281
|
+
|
|
282
|
+
const timers = []
|
|
283
|
+
|
|
284
|
+
// Send first PTR query immediately
|
|
285
|
+
this.#sendReconfirmQuery()
|
|
286
|
+
|
|
287
|
+
// Send second PTR query after ~2 seconds
|
|
288
|
+
timers.push(setTimeout(() => {
|
|
289
|
+
this.#sendReconfirmQuery()
|
|
290
|
+
}, 2_000))
|
|
291
|
+
|
|
292
|
+
// Set flush timeout — if no fresh response within the window, remove
|
|
293
|
+
timers.push(setTimeout(() => {
|
|
294
|
+
this.#pendingReconfirms.delete(fqdn)
|
|
295
|
+
if (this.services.has(fqdn)) {
|
|
296
|
+
this.#removeService(fqdn)
|
|
297
|
+
}
|
|
298
|
+
}, this.#reconfirmTimeoutMs))
|
|
299
|
+
|
|
300
|
+
this.#pendingReconfirms.set(fqdn, { timers, startedAt: Date.now() })
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
/**
|
|
304
|
+
* Cancel a pending reconfirmation for a service (called when a fresh
|
|
305
|
+
* response is received, proving the service is still alive).
|
|
306
|
+
* @param {string} fqdn
|
|
307
|
+
*/
|
|
308
|
+
#cancelReconfirm(fqdn) {
|
|
309
|
+
const pending = this.#pendingReconfirms.get(fqdn)
|
|
310
|
+
if (!pending) return
|
|
311
|
+
for (const timer of pending.timers) {
|
|
312
|
+
clearTimeout(timer)
|
|
313
|
+
}
|
|
314
|
+
this.#pendingReconfirms.delete(fqdn)
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
/**
|
|
318
|
+
* Flush all discovered services and restart querying from scratch.
|
|
319
|
+
*
|
|
320
|
+
* Call this after a network interface change (e.g. WiFi reconnect).
|
|
321
|
+
* All current services are emitted as `serviceDown` events, caches are
|
|
322
|
+
* cleared, and querying restarts with the initial rapid schedule.
|
|
323
|
+
*/
|
|
324
|
+
resetNetwork() {
|
|
325
|
+
if (this.#destroyed) return
|
|
326
|
+
|
|
327
|
+
// Emit serviceDown for all known services
|
|
328
|
+
for (const service of this.services.values()) {
|
|
329
|
+
this.#emit({ type: 'serviceDown', service })
|
|
330
|
+
}
|
|
331
|
+
this.services.clear()
|
|
332
|
+
this.#knownPtrRecords.clear()
|
|
333
|
+
this.#pendingFqdns.clear()
|
|
334
|
+
this.#addressTimestamps.clear()
|
|
335
|
+
|
|
336
|
+
// Cancel pending goodbyes — those services are already flushed
|
|
337
|
+
/* c8 ignore next 3 -- timer cleanup requires precise timing to have pending goodbyes during resetNetwork */
|
|
338
|
+
for (const timer of this.#pendingGoodbyes.values()) {
|
|
339
|
+
clearTimeout(timer)
|
|
340
|
+
}
|
|
341
|
+
this.#pendingGoodbyes.clear()
|
|
342
|
+
|
|
343
|
+
// Cancel pending reconfirmations — those services are already flushed
|
|
344
|
+
/* c8 ignore next 5 -- timer cleanup requires precise timing to have pending reconfirms during resetNetwork */
|
|
345
|
+
for (const pending of this.#pendingReconfirms.values()) {
|
|
346
|
+
for (const timer of pending.timers) {
|
|
347
|
+
clearTimeout(timer)
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
this.#pendingReconfirms.clear()
|
|
351
|
+
|
|
352
|
+
// Clear POOF tracking
|
|
353
|
+
/* c8 ignore next 3 -- timer cleanup requires precise timing to have POOF timers during resetNetwork */
|
|
354
|
+
for (const timer of this.#poofTimers.values()) {
|
|
355
|
+
clearTimeout(timer)
|
|
356
|
+
}
|
|
357
|
+
this.#poofTimers.clear()
|
|
358
|
+
this.#poofTracking.clear()
|
|
359
|
+
|
|
360
|
+
// Cancel existing timers
|
|
361
|
+
if (this.#queryTimer) {
|
|
362
|
+
clearTimeout(this.#queryTimer)
|
|
363
|
+
this.#queryTimer = null
|
|
364
|
+
}
|
|
365
|
+
if (this.#ttlCheckTimer) {
|
|
366
|
+
clearTimeout(this.#ttlCheckTimer)
|
|
367
|
+
this.#ttlCheckTimer = null
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
// Restart queries from the beginning (fast initial schedule)
|
|
371
|
+
this.#queryIndex = 0
|
|
372
|
+
this.#scheduleInitialQuery()
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
/**
|
|
376
|
+
* Stop browsing and end the async iterator.
|
|
377
|
+
* @param {any} [reason] - If provided, the iterator throws this instead of
|
|
378
|
+
* returning done: true (matching Node.js convention for AbortSignal).
|
|
379
|
+
*/
|
|
380
|
+
destroy(reason) {
|
|
381
|
+
if (this.#destroyed) return
|
|
382
|
+
this.#destroyed = true
|
|
383
|
+
if (reason !== undefined) this.#destroyReason = reason
|
|
384
|
+
|
|
385
|
+
if (this.#queryTimer) {
|
|
386
|
+
clearTimeout(this.#queryTimer)
|
|
387
|
+
this.#queryTimer = null
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
if (this.#ttlCheckTimer) {
|
|
391
|
+
clearTimeout(this.#ttlCheckTimer)
|
|
392
|
+
this.#ttlCheckTimer = null
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
/* c8 ignore next 3 -- timer cleanup requires pending goodbyes at destroy time */
|
|
396
|
+
for (const timer of this.#pendingGoodbyes.values()) {
|
|
397
|
+
clearTimeout(timer)
|
|
398
|
+
}
|
|
399
|
+
this.#pendingGoodbyes.clear()
|
|
400
|
+
|
|
401
|
+
/* c8 ignore next 5 -- timer cleanup requires pending reconfirms at destroy time */
|
|
402
|
+
for (const pending of this.#pendingReconfirms.values()) {
|
|
403
|
+
for (const timer of pending.timers) {
|
|
404
|
+
clearTimeout(timer)
|
|
405
|
+
}
|
|
406
|
+
}
|
|
407
|
+
this.#pendingReconfirms.clear()
|
|
408
|
+
|
|
409
|
+
this.#transport.removeHandler(this.#handlePacket)
|
|
410
|
+
this.#transport.removeQueryHandler(this.#handleIncomingQuery)
|
|
411
|
+
this.#transport.removeQueryHandler(this.#handleQuery)
|
|
412
|
+
|
|
413
|
+
// Clean up POOF tracking
|
|
414
|
+
for (const timer of this.#poofTimers.values()) {
|
|
415
|
+
clearTimeout(timer)
|
|
416
|
+
}
|
|
417
|
+
this.#poofTimers.clear()
|
|
418
|
+
this.#poofTracking.clear()
|
|
419
|
+
|
|
420
|
+
if (this.#abortHandler && this.#signal) {
|
|
421
|
+
this.#signal.removeEventListener('abort', this.#abortHandler)
|
|
422
|
+
this.#abortHandler = null
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
// Notify parent (DnsSdBrowser) so it can remove us from its set.
|
|
426
|
+
// Deferred via queueMicrotask so the callback always fires after the
|
|
427
|
+
// constructor returns — avoids a TDZ reference error when destroy() is
|
|
428
|
+
// called synchronously during construction (e.g. pre-aborted signal).
|
|
429
|
+
if (this.#onDestroy) {
|
|
430
|
+
const cb = this.#onDestroy
|
|
431
|
+
this.#onDestroy = null
|
|
432
|
+
queueMicrotask(cb)
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
// Wake the iterator so it can observe #destroyed and throw.
|
|
436
|
+
if (this.#eventSignal) {
|
|
437
|
+
this.#eventSignal.resolve()
|
|
438
|
+
}
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
/** @returns {Promise<void>} */
|
|
442
|
+
async [Symbol.asyncDispose]() {
|
|
443
|
+
this.destroy()
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
/** @returns {AsyncIterableIterator<BrowseEvent>} */
|
|
447
|
+
[Symbol.asyncIterator]() {
|
|
448
|
+
if (this.#destroyed) throw new Error('Browser has been destroyed')
|
|
449
|
+
if (this.#iterating) {
|
|
450
|
+
throw new Error('ServiceBrowser only supports a single concurrent async iterator')
|
|
451
|
+
}
|
|
452
|
+
this.#iterating = true
|
|
453
|
+
|
|
454
|
+
/** @type {AsyncIterableIterator<BrowseEvent>} */
|
|
455
|
+
const iter = {
|
|
456
|
+
next: async () => {
|
|
457
|
+
while (true) {
|
|
458
|
+
// Return buffered events first
|
|
459
|
+
if (this.#eventBuffer.length > 0) {
|
|
460
|
+
const value = /** @type {BrowseEvent} */ (this.#eventBuffer.shift())
|
|
461
|
+
return { value, done: false }
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
// If destroyed and no more buffered events, end iteration.
|
|
465
|
+
if (this.#destroyed) {
|
|
466
|
+
this.#iterating = false
|
|
467
|
+
if (this.#destroyReason !== undefined) {
|
|
468
|
+
throw this.#destroyReason
|
|
469
|
+
}
|
|
470
|
+
return { value: undefined, done: true }
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
// Wait for the next event.
|
|
474
|
+
// If destroyed while waiting, the deferred is resolved
|
|
475
|
+
// and the loop re-checks #destroyed on the next iteration.
|
|
476
|
+
this.#eventSignal = createDeferred()
|
|
477
|
+
await this.#eventSignal.promise
|
|
478
|
+
this.#eventSignal = null
|
|
479
|
+
}
|
|
480
|
+
},
|
|
481
|
+
|
|
482
|
+
return: async () => {
|
|
483
|
+
this.#iterating = false
|
|
484
|
+
this.destroy()
|
|
485
|
+
return { value: undefined, done: true }
|
|
486
|
+
},
|
|
487
|
+
|
|
488
|
+
[Symbol.asyncIterator]() { return iter },
|
|
489
|
+
}
|
|
490
|
+
return iter
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
// ─── Private methods ───────────────────────────────────────────────
|
|
494
|
+
|
|
495
|
+
/**
|
|
496
|
+
* Handle an incoming mDNS query packet (QR=0) for POOF.
|
|
497
|
+
* Per RFC 6762 §10.5: if we see queries for which our cached records
|
|
498
|
+
* would be expected to answer, but no answer is seen, the records
|
|
499
|
+
* should be flushed after 2+ unanswered queries within the timeout window.
|
|
500
|
+
* Arrow function to preserve `this` when used as a callback.
|
|
501
|
+
* @type {(packet: DnsPacket) => void}
|
|
502
|
+
*/
|
|
503
|
+
#handleQuery = (packet) => {
|
|
504
|
+
if (this.#destroyed) return
|
|
505
|
+
|
|
506
|
+
// Ignore our own queries that loop back via multicast (same guard
|
|
507
|
+
// as #handleIncomingQuery uses for duplicate question suppression).
|
|
508
|
+
if (Date.now() - this.#lastQuerySentAt < 100) return
|
|
509
|
+
|
|
510
|
+
// Only interested in QM (multicast) queries for our service type.
|
|
511
|
+
// QU queries may receive unicast responses we can't observe, so
|
|
512
|
+
// counting them as unanswered would cause false POOF flushes.
|
|
513
|
+
const questions = packet.questions || []
|
|
514
|
+
const isRelevant = questions.some(
|
|
515
|
+
(q) =>
|
|
516
|
+
!q.qu &&
|
|
517
|
+
q.name.toLowerCase() === this.#queryName.toLowerCase() &&
|
|
518
|
+
q.type === RecordType.PTR
|
|
519
|
+
)
|
|
520
|
+
if (!isRelevant) return
|
|
521
|
+
|
|
522
|
+
// For each cached service FQDN, track the query and start/extend a
|
|
523
|
+
// response-wait timer. Multiple queries within the wait window are
|
|
524
|
+
// all counted when the timer fires.
|
|
525
|
+
const now = Date.now()
|
|
526
|
+
for (const fqdn of this.#knownPtrRecords.keys()) {
|
|
527
|
+
// Initialize or refresh tracking for this FQDN
|
|
528
|
+
let tracking = this.#poofTracking.get(fqdn)
|
|
529
|
+
if (!tracking || (now - tracking.firstSeenAt) > this.#poofTimeoutMs) {
|
|
530
|
+
tracking = { queryCount: 0, firstSeenAt: now, pendingQueries: 0 }
|
|
531
|
+
this.#poofTracking.set(fqdn, tracking)
|
|
532
|
+
}
|
|
533
|
+
tracking.pendingQueries = (tracking.pendingQueries || 0) + 1
|
|
534
|
+
|
|
535
|
+
// If a response-wait timer is already running, it will handle
|
|
536
|
+
// the accumulated pending queries when it fires
|
|
537
|
+
if (this.#poofTimers.has(fqdn)) continue
|
|
538
|
+
|
|
539
|
+
// Capture query observation time for window checks (not the timer
|
|
540
|
+
// fire time, which includes the response-wait delay and could cause
|
|
541
|
+
// the window to appear expired when it isn't).
|
|
542
|
+
const queryObservedAt = now
|
|
543
|
+
const timer = setTimeout(() => {
|
|
544
|
+
this.#poofTimers.delete(fqdn)
|
|
545
|
+
const current = this.#poofTracking.get(fqdn)
|
|
546
|
+
if (!current || !current.pendingQueries) return
|
|
547
|
+
|
|
548
|
+
// If tracking window expired, start fresh with pending queries
|
|
549
|
+
/* c8 ignore next 4 -- requires POOF window expiry timing */
|
|
550
|
+
if (queryObservedAt - current.firstSeenAt > this.#poofTimeoutMs) {
|
|
551
|
+
current.queryCount = current.pendingQueries
|
|
552
|
+
current.firstSeenAt = queryObservedAt
|
|
553
|
+
} else {
|
|
554
|
+
current.queryCount += current.pendingQueries
|
|
555
|
+
}
|
|
556
|
+
current.pendingQueries = 0
|
|
557
|
+
|
|
558
|
+
// After 2+ unanswered queries within the window, flush the record
|
|
559
|
+
if (current.queryCount >= 2 && (queryObservedAt - current.firstSeenAt) <= this.#poofTimeoutMs) {
|
|
560
|
+
this.#removeService(fqdn)
|
|
561
|
+
}
|
|
562
|
+
}, this.#poofResponseWaitMs)
|
|
563
|
+
|
|
564
|
+
this.#poofTimers.set(fqdn, timer)
|
|
565
|
+
}
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
/**
|
|
569
|
+
* Clear POOF tracking for a service FQDN when a valid response is observed.
|
|
570
|
+
* @param {string} serviceFqdn
|
|
571
|
+
*/
|
|
572
|
+
#clearPoofTracking(serviceFqdn) {
|
|
573
|
+
this.#poofTracking.delete(serviceFqdn)
|
|
574
|
+
const timer = this.#poofTimers.get(serviceFqdn)
|
|
575
|
+
if (timer) {
|
|
576
|
+
clearTimeout(timer)
|
|
577
|
+
this.#poofTimers.delete(serviceFqdn)
|
|
578
|
+
}
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
/** Send the initial query after a short random delay (RFC 6762 §5.2). */
|
|
582
|
+
#scheduleInitialQuery() {
|
|
583
|
+
// Random delay 20-120ms to avoid thundering herd (RFC 6762 §5.2)
|
|
584
|
+
const jitter = 20 + Math.random() * 100
|
|
585
|
+
this.#queryTimer = setTimeout(() => {
|
|
586
|
+
this.#sendQuery()
|
|
587
|
+
this.#queryIndex++
|
|
588
|
+
this.#scheduleNextQuery()
|
|
589
|
+
}, jitter)
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
/** Schedule the next query with exponential backoff. */
|
|
593
|
+
#scheduleNextQuery() {
|
|
594
|
+
if (this.#destroyed) return
|
|
595
|
+
|
|
596
|
+
const intervalIndex = Math.min(this.#queryIndex, QUERY_INTERVALS_MS.length - 1)
|
|
597
|
+
// Add 2% random jitter per RFC 6762 §5.2
|
|
598
|
+
const baseInterval = QUERY_INTERVALS_MS[intervalIndex]
|
|
599
|
+
const jitter = baseInterval * 0.02 * Math.random()
|
|
600
|
+
const delay = baseInterval + jitter
|
|
601
|
+
|
|
602
|
+
this.#queryTimer = setTimeout(() => {
|
|
603
|
+
this.#sendQuery()
|
|
604
|
+
this.#queryIndex++
|
|
605
|
+
this.#scheduleNextQuery()
|
|
606
|
+
}, delay)
|
|
607
|
+
}
|
|
608
|
+
|
|
609
|
+
/**
|
|
610
|
+
* Cancel the current query timer and reschedule the next query.
|
|
611
|
+
* Used by duplicate question suppression (RFC 6762 §7.3) to treat
|
|
612
|
+
* another host's matching query as if we had sent our own.
|
|
613
|
+
*/
|
|
614
|
+
#rescheduleQuery() {
|
|
615
|
+
if (this.#destroyed) return
|
|
616
|
+
if (this.#queryTimer) {
|
|
617
|
+
clearTimeout(this.#queryTimer)
|
|
618
|
+
this.#queryTimer = null
|
|
619
|
+
}
|
|
620
|
+
// Advance the query schedule as if we had just sent a query ourselves
|
|
621
|
+
this.#queryIndex++
|
|
622
|
+
this.#scheduleNextQuery()
|
|
623
|
+
}
|
|
624
|
+
|
|
625
|
+
/**
|
|
626
|
+
* Handle an incoming mDNS query packet for duplicate question suppression.
|
|
627
|
+
* Per RFC 6762 §7.3, if another host sends a QM query matching ours and
|
|
628
|
+
* their Known-Answer section covers all the records we would include,
|
|
629
|
+
* we suppress our next scheduled query.
|
|
630
|
+
* Arrow function to preserve `this` when used as a callback.
|
|
631
|
+
* @type {(packet: DnsPacket) => void}
|
|
632
|
+
*/
|
|
633
|
+
#handleIncomingQuery = (packet) => {
|
|
634
|
+
if (this.#destroyed) return
|
|
635
|
+
|
|
636
|
+
// Only suppress during QM phase (after initial QU query has been sent)
|
|
637
|
+
if (this.#queryIndex <= 0) return
|
|
638
|
+
|
|
639
|
+
// Ignore our own queries that loop back via multicast.
|
|
640
|
+
// If we sent a query very recently (<100ms), this is likely our own loopback.
|
|
641
|
+
if (Date.now() - this.#lastQuerySentAt < 100) return
|
|
642
|
+
|
|
643
|
+
// Check if any QM (multicast) question matches our query.
|
|
644
|
+
// RFC 6762 §7.3 applies only to QM questions, not QU questions.
|
|
645
|
+
const hasMatchingQuestion = packet.questions.some(
|
|
646
|
+
(q) =>
|
|
647
|
+
q.type === RecordType.PTR &&
|
|
648
|
+
q.name.toLowerCase() === this.#queryName.toLowerCase() &&
|
|
649
|
+
!q.qu
|
|
650
|
+
)
|
|
651
|
+
if (!hasMatchingQuestion) return
|
|
652
|
+
|
|
653
|
+
// Check if their known answers cover ours. Use the same TTL>50% filter
|
|
654
|
+
// as #sendQuery() so suppression decisions match what we'd actually send
|
|
655
|
+
// (RFC 6762 §7.1).
|
|
656
|
+
const theirAnswers = new Set(
|
|
657
|
+
(packet.answers || [])
|
|
658
|
+
.filter(
|
|
659
|
+
(a) =>
|
|
660
|
+
a.type === RecordType.PTR &&
|
|
661
|
+
typeof a.name === 'string' &&
|
|
662
|
+
a.name.toLowerCase() === this.#queryName.toLowerCase() &&
|
|
663
|
+
typeof a.data === 'string'
|
|
664
|
+
)
|
|
665
|
+
.map((a) => /** @type {string} */ (a.data).toLowerCase())
|
|
666
|
+
)
|
|
667
|
+
|
|
668
|
+
const now = Date.now()
|
|
669
|
+
const ourKnownAnswers = []
|
|
670
|
+
for (const [fqdn, info] of this.#knownPtrRecords) {
|
|
671
|
+
const elapsed = (now - info.receivedAt) / 1000
|
|
672
|
+
const remaining = info.ttl - elapsed
|
|
673
|
+
if (remaining > info.ttl / 2) {
|
|
674
|
+
ourKnownAnswers.push(fqdn.toLowerCase())
|
|
675
|
+
}
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
const allCovered = ourKnownAnswers.every((fqdn) => theirAnswers.has(fqdn))
|
|
679
|
+
|
|
680
|
+
if (allCovered) {
|
|
681
|
+
// Suppress our next query — treat as if we just sent it
|
|
682
|
+
this.#rescheduleQuery()
|
|
683
|
+
}
|
|
684
|
+
}
|
|
685
|
+
|
|
686
|
+
/** Send an mDNS PTR query, including known answers for suppression. */
|
|
687
|
+
async #sendQuery() {
|
|
688
|
+
if (this.#destroyed) return
|
|
689
|
+
|
|
690
|
+
/** @type {import('./dns.js').DnsRecord[]} */
|
|
691
|
+
const knownAnswers = []
|
|
692
|
+
|
|
693
|
+
// Include known PTR records whose TTL is still >50% remaining
|
|
694
|
+
const now = Date.now()
|
|
695
|
+
for (const [fqdn, info] of this.#knownPtrRecords) {
|
|
696
|
+
const elapsed = (now - info.receivedAt) / 1000
|
|
697
|
+
const remaining = info.ttl - elapsed
|
|
698
|
+
// Only include if remaining TTL > 50% of original (RFC 6762 §7.1)
|
|
699
|
+
if (remaining > info.ttl / 2) {
|
|
700
|
+
knownAnswers.push({
|
|
701
|
+
name: this.#queryName,
|
|
702
|
+
type: RecordType.PTR,
|
|
703
|
+
class: 1,
|
|
704
|
+
cacheFlush: false,
|
|
705
|
+
ttl: Math.round(remaining),
|
|
706
|
+
data: fqdn,
|
|
707
|
+
})
|
|
708
|
+
}
|
|
709
|
+
}
|
|
710
|
+
|
|
711
|
+
// Set QU (unicast-response) bit on the first query (RFC 6762 §5.4).
|
|
712
|
+
// This allows responders to reply via unicast, reducing multicast traffic.
|
|
713
|
+
const qu = this.#queryIndex === 0
|
|
714
|
+
|
|
715
|
+
try {
|
|
716
|
+
// Set timestamp before sending so the loopback guard in
|
|
717
|
+
// #handleIncomingQuery filters out our own multicast query even if
|
|
718
|
+
// it loops back before the await resolves.
|
|
719
|
+
this.#lastQuerySentAt = Date.now()
|
|
720
|
+
await this.#transport.sendQuery({
|
|
721
|
+
questions: [{ name: this.#queryName, type: RecordType.PTR, qu }],
|
|
722
|
+
answers: knownAnswers,
|
|
723
|
+
})
|
|
724
|
+
/* c8 ignore start -- non-fatal catch requires network send failure */
|
|
725
|
+
} catch {
|
|
726
|
+
// Query send failures are non-fatal — we'll retry on the next interval
|
|
727
|
+
}
|
|
728
|
+
/* c8 ignore stop */
|
|
729
|
+
}
|
|
730
|
+
|
|
731
|
+
/**
|
|
732
|
+
* Send a PTR query for reconfirmation (RFC 6762 §10.4).
|
|
733
|
+
* Does not include known answers — we want the responder to re-announce.
|
|
734
|
+
*/
|
|
735
|
+
async #sendReconfirmQuery() {
|
|
736
|
+
if (this.#destroyed) return
|
|
737
|
+
try {
|
|
738
|
+
this.#lastQuerySentAt = Date.now()
|
|
739
|
+
await this.#transport.sendQuery({
|
|
740
|
+
questions: [{ name: this.#queryName, type: RecordType.PTR, qu: false }],
|
|
741
|
+
})
|
|
742
|
+
/* c8 ignore start -- non-fatal catch requires network send failure */
|
|
743
|
+
} catch {
|
|
744
|
+
// Non-fatal — we'll retry
|
|
745
|
+
}
|
|
746
|
+
/* c8 ignore stop */
|
|
747
|
+
}
|
|
748
|
+
|
|
749
|
+
/**
|
|
750
|
+
* Re-send the query with the QU bit set after receiving a truncated response.
|
|
751
|
+
* This allows the responder to reply via unicast with the full record set
|
|
752
|
+
* (RFC 6762 §18.5).
|
|
753
|
+
*/
|
|
754
|
+
async #sendTruncatedRetry() {
|
|
755
|
+
try {
|
|
756
|
+
await this.#transport.sendQuery({
|
|
757
|
+
questions: [{ name: this.#queryName, type: RecordType.PTR, qu: true }],
|
|
758
|
+
})
|
|
759
|
+
/* c8 ignore start -- non-fatal catch requires network send failure */
|
|
760
|
+
} catch {
|
|
761
|
+
// Non-fatal — the normal query schedule will retry
|
|
762
|
+
}
|
|
763
|
+
/* c8 ignore stop */
|
|
764
|
+
}
|
|
765
|
+
|
|
766
|
+
/**
|
|
767
|
+
* Schedule a TTL expiration check at the time the soonest record expires.
|
|
768
|
+
* Called whenever the set of known PTR records changes (new record added
|
|
769
|
+
* or record removed). This is more efficient and responsive than a fixed
|
|
770
|
+
* interval — it only wakes when a record actually needs to expire, and
|
|
771
|
+
* services are removed within ~1 second of their TTL expiring.
|
|
772
|
+
*/
|
|
773
|
+
#scheduleTtlCheck() {
|
|
774
|
+
if (this.#destroyed) return
|
|
775
|
+
|
|
776
|
+
// Cancel any existing scheduled check
|
|
777
|
+
if (this.#ttlCheckTimer) {
|
|
778
|
+
clearTimeout(this.#ttlCheckTimer)
|
|
779
|
+
this.#ttlCheckTimer = null
|
|
780
|
+
}
|
|
781
|
+
|
|
782
|
+
if (this.#knownPtrRecords.size === 0) return
|
|
783
|
+
|
|
784
|
+
// Find the soonest event: a refresh query at 80/85/90/95% TTL or expiry at 100%.
|
|
785
|
+
// Per RFC 6762 §5.2, queries are sent at 80%, 85%, 90%, 95% of TTL with 2% jitter.
|
|
786
|
+
const now = Date.now()
|
|
787
|
+
let soonestDelay = Infinity
|
|
788
|
+
|
|
789
|
+
for (const info of this.#knownPtrRecords.values()) {
|
|
790
|
+
const ttlMs = info.ttl * 1000
|
|
791
|
+
const expiresAt = info.receivedAt + ttlMs
|
|
792
|
+
const expiryDelay = expiresAt - now
|
|
793
|
+
|
|
794
|
+
// Find the next refresh threshold this record hasn't passed yet
|
|
795
|
+
let nextEvent = expiryDelay
|
|
796
|
+
for (const threshold of TTL_REFRESH_THRESHOLDS) {
|
|
797
|
+
const refreshAt = info.receivedAt + ttlMs * threshold
|
|
798
|
+
const refreshDelay = refreshAt - now
|
|
799
|
+
if (refreshDelay > 0) {
|
|
800
|
+
nextEvent = refreshDelay
|
|
801
|
+
break
|
|
802
|
+
}
|
|
803
|
+
}
|
|
804
|
+
|
|
805
|
+
if (nextEvent < soonestDelay) {
|
|
806
|
+
soonestDelay = nextEvent
|
|
807
|
+
}
|
|
808
|
+
}
|
|
809
|
+
|
|
810
|
+
// Clamp to at least MIN_TTL_CHECK_DELAY_MS to avoid busy-looping
|
|
811
|
+
const delay = Math.max(soonestDelay, MIN_TTL_CHECK_DELAY_MS)
|
|
812
|
+
|
|
813
|
+
this.#ttlCheckTimer = setTimeout(() => {
|
|
814
|
+
this.#ttlCheckTimer = null
|
|
815
|
+
this.#processRecordLifecycle()
|
|
816
|
+
}, delay)
|
|
817
|
+
}
|
|
818
|
+
|
|
819
|
+
/**
|
|
820
|
+
* Process record lifecycle: send refresh queries for records approaching
|
|
821
|
+
* expiry (80/85/90/95% of TTL per RFC 6762 §5.2), and remove expired records.
|
|
822
|
+
* Then reschedule for the next event.
|
|
823
|
+
*/
|
|
824
|
+
#processRecordLifecycle() {
|
|
825
|
+
if (this.#destroyed) return
|
|
826
|
+
|
|
827
|
+
const now = Date.now()
|
|
828
|
+
let needsRefresh = false
|
|
829
|
+
|
|
830
|
+
for (const [fqdn, info] of this.#knownPtrRecords) {
|
|
831
|
+
const elapsed = (now - info.receivedAt) / 1000
|
|
832
|
+
const fraction = elapsed / info.ttl
|
|
833
|
+
|
|
834
|
+
if (fraction >= 1) {
|
|
835
|
+
// TTL fully expired — remove the service
|
|
836
|
+
this.#removeService(fqdn)
|
|
837
|
+
} else if (fraction >= TTL_REFRESH_THRESHOLDS[0]) {
|
|
838
|
+
// Between 80-100% of TTL — need a refresh query (RFC 6762 §5.2)
|
|
839
|
+
needsRefresh = true
|
|
840
|
+
}
|
|
841
|
+
}
|
|
842
|
+
|
|
843
|
+
// Send a single refresh query for all records approaching expiry.
|
|
844
|
+
// The normal PTR query will prompt the advertiser to re-announce.
|
|
845
|
+
if (needsRefresh) {
|
|
846
|
+
this.#sendQuery()
|
|
847
|
+
}
|
|
848
|
+
|
|
849
|
+
// Reschedule for the next soonest event (if any records remain)
|
|
850
|
+
this.#scheduleTtlCheck()
|
|
851
|
+
}
|
|
852
|
+
|
|
853
|
+
/**
|
|
854
|
+
* Handle an incoming mDNS response packet.
|
|
855
|
+
* Arrow function to preserve `this` when used as a callback.
|
|
856
|
+
* @type {(packet: DnsPacket) => void}
|
|
857
|
+
*/
|
|
858
|
+
#handlePacket = (packet) => {
|
|
859
|
+
if (this.#destroyed) return
|
|
860
|
+
|
|
861
|
+
// If the response is truncated (TC bit), re-query with QU bit to get
|
|
862
|
+
// the full response via unicast (RFC 6762 §18.5).
|
|
863
|
+
if (packet.flags.tc) {
|
|
864
|
+
this.#sendTruncatedRetry()
|
|
865
|
+
}
|
|
866
|
+
|
|
867
|
+
// Merge all record sections — answers and additionals always contain
|
|
868
|
+
// relevant records, and some implementations also place records in the
|
|
869
|
+
// authority section. Including all three is safe and maximizes
|
|
870
|
+
// interoperability with non-standard advertisers.
|
|
871
|
+
const allRecords = [...packet.answers, ...packet.authorities, ...packet.additionals]
|
|
872
|
+
|
|
873
|
+
// First pass: find PTR records pointing to service instances
|
|
874
|
+
for (const record of allRecords) {
|
|
875
|
+
if (record.type !== RecordType.PTR) continue
|
|
876
|
+
if (!this.#isRelevantPtrRecord(record)) continue
|
|
877
|
+
|
|
878
|
+
const serviceFqdn = /** @type {string} */ (record.data)
|
|
879
|
+
|
|
880
|
+
if (record.ttl === 0) {
|
|
881
|
+
// Goodbye packet — schedule removal after 1 second (RFC 6762 §10.1)
|
|
882
|
+
this.#scheduleGoodbye(serviceFqdn)
|
|
883
|
+
continue
|
|
884
|
+
}
|
|
885
|
+
|
|
886
|
+
// A re-announcement cancels any pending goodbye for this service
|
|
887
|
+
this.#cancelGoodbye(serviceFqdn)
|
|
888
|
+
|
|
889
|
+
// Clear POOF tracking — we got a response for this record (RFC 6762 §10.5)
|
|
890
|
+
this.#clearPoofTracking(serviceFqdn)
|
|
891
|
+
|
|
892
|
+
// A fresh response cancels any pending reconfirmation (RFC 6762 §10.4)
|
|
893
|
+
this.#cancelReconfirm(serviceFqdn)
|
|
894
|
+
|
|
895
|
+
// Track for known-answer suppression (bounded by MAX_SERVICES)
|
|
896
|
+
if (this.#knownPtrRecords.size < MAX_SERVICES || this.#knownPtrRecords.has(serviceFqdn)) {
|
|
897
|
+
this.#knownPtrRecords.set(serviceFqdn, {
|
|
898
|
+
ttl: record.ttl,
|
|
899
|
+
receivedAt: Date.now(),
|
|
900
|
+
})
|
|
901
|
+
this.#scheduleTtlCheck()
|
|
902
|
+
}
|
|
903
|
+
|
|
904
|
+
// Look for SRV, TXT, A, AAAA records for this instance
|
|
905
|
+
this.#resolveService(serviceFqdn, allRecords)
|
|
906
|
+
}
|
|
907
|
+
|
|
908
|
+
// Also handle standalone SRV/TXT/A/AAAA updates for already-known services.
|
|
909
|
+
// Note: if #resolveService already processed a TXT/address update above (via PTR path),
|
|
910
|
+
// the service in the map is already updated, so #handleRecordUpdates will see no diff
|
|
911
|
+
// and will not double-emit a serviceUpdated event.
|
|
912
|
+
this.#handleRecordUpdates(allRecords)
|
|
913
|
+
|
|
914
|
+
// Populate subtypes from subtype PTR records (RFC 6763 §7.1).
|
|
915
|
+
// These have the form _subtype._sub._type._proto.domain → instance FQDN.
|
|
916
|
+
this.#handleSubtypeRecords(allRecords)
|
|
917
|
+
}
|
|
918
|
+
|
|
919
|
+
/**
|
|
920
|
+
* Check if a PTR record is relevant to this browser.
|
|
921
|
+
* @param {DnsRecord} record
|
|
922
|
+
* @returns {boolean}
|
|
923
|
+
*/
|
|
924
|
+
#isRelevantPtrRecord(record) {
|
|
925
|
+
const name = record.name.toLowerCase()
|
|
926
|
+
const queryName = this.#queryName.toLowerCase()
|
|
927
|
+
return name === queryName
|
|
928
|
+
}
|
|
929
|
+
|
|
930
|
+
/**
|
|
931
|
+
* Build or update a service from a set of records.
|
|
932
|
+
* @param {string} serviceFqdn - e.g. "My Service._http._tcp.local"
|
|
933
|
+
* @param {DnsRecord[]} records
|
|
934
|
+
*/
|
|
935
|
+
#resolveService(serviceFqdn, records) {
|
|
936
|
+
const existing = this.services.get(serviceFqdn)
|
|
937
|
+
|
|
938
|
+
// Guard against resource exhaustion: cap the number of tracked services.
|
|
939
|
+
// An attacker could flood the network with unique service names.
|
|
940
|
+
if (!existing && this.services.size >= MAX_SERVICES) {
|
|
941
|
+
return
|
|
942
|
+
}
|
|
943
|
+
|
|
944
|
+
// Find SRV record for this service (DNS names are case-insensitive per RFC 1035 §3.1)
|
|
945
|
+
const fqdnLower = serviceFqdn.toLowerCase()
|
|
946
|
+
const srvRecord = records.find(
|
|
947
|
+
(r) => r.type === RecordType.SRV && r.name.toLowerCase() === fqdnLower
|
|
948
|
+
)
|
|
949
|
+
|
|
950
|
+
// Find TXT record for this service
|
|
951
|
+
const txtRecord = records.find(
|
|
952
|
+
(r) => r.type === RecordType.TXT && r.name.toLowerCase() === fqdnLower
|
|
953
|
+
)
|
|
954
|
+
|
|
955
|
+
if (!srvRecord && !existing) {
|
|
956
|
+
if (this.#isTypeEnumeration) {
|
|
957
|
+
// Service type enumeration: PTR data is a service type, not an instance.
|
|
958
|
+
// No SRV/TXT/A records are expected — emit immediately.
|
|
959
|
+
const service = /** @type {Service} */ ({
|
|
960
|
+
name: serviceFqdn,
|
|
961
|
+
type: serviceFqdn,
|
|
962
|
+
protocol: '',
|
|
963
|
+
domain: this.#domain,
|
|
964
|
+
host: '',
|
|
965
|
+
port: 0,
|
|
966
|
+
addresses: [],
|
|
967
|
+
txt: {},
|
|
968
|
+
txtRaw: {},
|
|
969
|
+
fqdn: serviceFqdn,
|
|
970
|
+
subtypes: [],
|
|
971
|
+
updatedAt: Date.now(),
|
|
972
|
+
})
|
|
973
|
+
this.services.set(serviceFqdn, service)
|
|
974
|
+
this.#emit({ type: 'serviceUp', service })
|
|
975
|
+
return
|
|
976
|
+
}
|
|
977
|
+
// Can't resolve without SRV — track as pending for when SRV arrives
|
|
978
|
+
// in a separate packet (common with split responses from some advertisers)
|
|
979
|
+
this.#pendingFqdns.add(serviceFqdn)
|
|
980
|
+
return
|
|
981
|
+
}
|
|
982
|
+
|
|
983
|
+
const srvData = srvRecord
|
|
984
|
+
? /** @type {import('./dns.js').SrvData} */ (srvRecord.data)
|
|
985
|
+
: existing
|
|
986
|
+
? { target: existing.host, port: existing.port, priority: 0, weight: 0 }
|
|
987
|
+
: null
|
|
988
|
+
|
|
989
|
+
if (!srvData) return
|
|
990
|
+
|
|
991
|
+
// Parse TXT data
|
|
992
|
+
const txtData = txtRecord
|
|
993
|
+
? /** @type {Uint8Array[]} */ (txtRecord.data)
|
|
994
|
+
: []
|
|
995
|
+
const { txt, txtRaw } = parseTxtData(txtData)
|
|
996
|
+
|
|
997
|
+
// Find A/AAAA records for the target host.
|
|
998
|
+
// If any address record has the cache-flush bit set (RFC 6762 §10.2),
|
|
999
|
+
// the new addresses replace rather than merge with existing ones.
|
|
1000
|
+
const targetHost = srvData.target
|
|
1001
|
+
const { addresses, cacheFlush: addrFlush } = this.#collectAddresses(targetHost, records)
|
|
1002
|
+
|
|
1003
|
+
// RFC 6762 §10.2: cache-flush bit means the sender is asserting this is the
|
|
1004
|
+
// complete set of records — old cached addresses received more than 1 second
|
|
1005
|
+
// ago should be flushed, but addresses received within the last 1 second are
|
|
1006
|
+
// kept to allow multi-packet announcement bursts.
|
|
1007
|
+
/* c8 ignore next 5 -- merging old addresses without flush requires multi-response scenario */
|
|
1008
|
+
if (existing && !addrFlush) {
|
|
1009
|
+
for (const addr of existing.addresses) {
|
|
1010
|
+
if (!addresses.includes(addr)) {
|
|
1011
|
+
addresses.push(addr)
|
|
1012
|
+
}
|
|
1013
|
+
}
|
|
1014
|
+
} else if (existing && addrFlush) {
|
|
1015
|
+
const now = Date.now()
|
|
1016
|
+
const addrTimestamps = this.#addressTimestamps.get(serviceFqdn)
|
|
1017
|
+
if (addrTimestamps) {
|
|
1018
|
+
for (const addr of existing.addresses) {
|
|
1019
|
+
if (addresses.includes(addr)) continue
|
|
1020
|
+
const receivedAt = addrTimestamps.get(addr)
|
|
1021
|
+
// Keep addresses received within the last 1 second (grace period)
|
|
1022
|
+
if (receivedAt !== undefined && (now - receivedAt) <= 1000) {
|
|
1023
|
+
addresses.push(addr)
|
|
1024
|
+
}
|
|
1025
|
+
}
|
|
1026
|
+
}
|
|
1027
|
+
}
|
|
1028
|
+
|
|
1029
|
+
// Update per-address timestamps
|
|
1030
|
+
this.#updateAddressTimestamps(serviceFqdn, addresses)
|
|
1031
|
+
|
|
1032
|
+
const instanceName = extractInstanceName(serviceFqdn, `${this.#serviceType}.${this.#domain}`)
|
|
1033
|
+
|
|
1034
|
+
/** @type {Service} */
|
|
1035
|
+
const service = {
|
|
1036
|
+
name: instanceName,
|
|
1037
|
+
type: this.#serviceType,
|
|
1038
|
+
protocol: this.#protocol,
|
|
1039
|
+
domain: this.#domain,
|
|
1040
|
+
host: targetHost,
|
|
1041
|
+
port: srvData.port,
|
|
1042
|
+
addresses,
|
|
1043
|
+
txt: existing && !txtRecord ? existing.txt : txt,
|
|
1044
|
+
txtRaw: existing && !txtRecord ? existing.txtRaw : txtRaw,
|
|
1045
|
+
fqdn: serviceFqdn,
|
|
1046
|
+
subtypes: existing?.subtypes ?? [],
|
|
1047
|
+
updatedAt: Date.now(),
|
|
1048
|
+
}
|
|
1049
|
+
|
|
1050
|
+
if (existing) {
|
|
1051
|
+
// Check if anything actually changed
|
|
1052
|
+
const txtChanged = txtRecord && !txtEqual(existing.txt, service.txt)
|
|
1053
|
+
const addrChanged = !arrayEqual(existing.addresses, service.addresses)
|
|
1054
|
+
const hostChanged = existing.host !== service.host
|
|
1055
|
+
const portChanged = existing.port !== service.port
|
|
1056
|
+
|
|
1057
|
+
if (txtChanged || addrChanged || hostChanged || portChanged) {
|
|
1058
|
+
this.services.set(serviceFqdn, service)
|
|
1059
|
+
this.#emit({ type: 'serviceUpdated', service })
|
|
1060
|
+
}
|
|
1061
|
+
// If nothing changed, don't emit (duplicate suppression)
|
|
1062
|
+
} else {
|
|
1063
|
+
this.#pendingFqdns.delete(serviceFqdn)
|
|
1064
|
+
this.services.set(serviceFqdn, service)
|
|
1065
|
+
this.#emit({ type: 'serviceUp', service })
|
|
1066
|
+
}
|
|
1067
|
+
}
|
|
1068
|
+
|
|
1069
|
+
/**
|
|
1070
|
+
* Handle updates to records for already-known services.
|
|
1071
|
+
* This catches TXT updates and address changes that arrive without PTR records.
|
|
1072
|
+
* @param {DnsRecord[]} records
|
|
1073
|
+
*/
|
|
1074
|
+
#handleRecordUpdates(records) {
|
|
1075
|
+
for (const [fqdn, service] of this.services) {
|
|
1076
|
+
// Check for TXT updates (DNS names are case-insensitive per RFC 1035 §3.1)
|
|
1077
|
+
const fqdnLower = fqdn.toLowerCase()
|
|
1078
|
+
const txtRecord = records.find(
|
|
1079
|
+
(r) => r.type === RecordType.TXT && r.name.toLowerCase() === fqdnLower
|
|
1080
|
+
)
|
|
1081
|
+
if (txtRecord) {
|
|
1082
|
+
const txtData = /** @type {Uint8Array[]} */ (txtRecord.data)
|
|
1083
|
+
const { txt, txtRaw } = parseTxtData(txtData)
|
|
1084
|
+
|
|
1085
|
+
if (txtRecord.ttl === 0) {
|
|
1086
|
+
// TXT goodbye — unusual but handle it
|
|
1087
|
+
continue
|
|
1088
|
+
}
|
|
1089
|
+
|
|
1090
|
+
if (!txtEqual(service.txt, txt)) {
|
|
1091
|
+
const updatedService = { ...service, txt, txtRaw, updatedAt: Date.now() }
|
|
1092
|
+
this.services.set(fqdn, updatedService)
|
|
1093
|
+
this.#emit({ type: 'serviceUpdated', service: updatedService })
|
|
1094
|
+
}
|
|
1095
|
+
}
|
|
1096
|
+
|
|
1097
|
+
// Check for address updates.
|
|
1098
|
+
// Cache-flush bit (RFC 6762 §10.2) means flush addresses received more than
|
|
1099
|
+
// 1 second ago, but keep those within the grace period.
|
|
1100
|
+
const { addresses: newAddresses, cacheFlush: addrFlush } = this.#collectAddresses(service.host, records)
|
|
1101
|
+
if (newAddresses.length > 0) {
|
|
1102
|
+
let finalAddresses
|
|
1103
|
+
if (addrFlush) {
|
|
1104
|
+
finalAddresses = [...newAddresses]
|
|
1105
|
+
const now = Date.now()
|
|
1106
|
+
const addrTimestamps = this.#addressTimestamps.get(fqdn)
|
|
1107
|
+
if (addrTimestamps) {
|
|
1108
|
+
for (const addr of service.addresses) {
|
|
1109
|
+
if (finalAddresses.includes(addr)) continue
|
|
1110
|
+
const receivedAt = addrTimestamps.get(addr)
|
|
1111
|
+
if (receivedAt !== undefined && (now - receivedAt) <= 1000) {
|
|
1112
|
+
finalAddresses.push(addr)
|
|
1113
|
+
}
|
|
1114
|
+
}
|
|
1115
|
+
}
|
|
1116
|
+
} else {
|
|
1117
|
+
finalAddresses = [...new Set([...service.addresses, ...newAddresses])]
|
|
1118
|
+
}
|
|
1119
|
+
if (!arrayEqual(service.addresses, finalAddresses)) {
|
|
1120
|
+
this.#updateAddressTimestamps(fqdn, finalAddresses)
|
|
1121
|
+
const updatedService = { ...service, addresses: finalAddresses, updatedAt: Date.now() }
|
|
1122
|
+
this.services.set(fqdn, updatedService)
|
|
1123
|
+
this.#emit({ type: 'serviceUpdated', service: updatedService })
|
|
1124
|
+
}
|
|
1125
|
+
}
|
|
1126
|
+
|
|
1127
|
+
// Check for SRV goodbye
|
|
1128
|
+
const srvRecord = records.find(
|
|
1129
|
+
(r) => r.type === RecordType.SRV && r.name.toLowerCase() === fqdnLower && r.ttl === 0
|
|
1130
|
+
)
|
|
1131
|
+
if (srvRecord) {
|
|
1132
|
+
this.#scheduleGoodbye(fqdn)
|
|
1133
|
+
}
|
|
1134
|
+
}
|
|
1135
|
+
|
|
1136
|
+
// Check for SRV records that resolve previously-pending FQDNs.
|
|
1137
|
+
// This handles split responses where PTR arrived in one packet and
|
|
1138
|
+
// SRV arrives later in a separate packet without a PTR re-announcement.
|
|
1139
|
+
if (this.#pendingFqdns.size > 0) {
|
|
1140
|
+
for (const record of records) {
|
|
1141
|
+
if (record.type !== RecordType.SRV) continue
|
|
1142
|
+
const srvNameLower = record.name.toLowerCase()
|
|
1143
|
+
for (const fqdn of this.#pendingFqdns) {
|
|
1144
|
+
if (fqdn.toLowerCase() === srvNameLower) {
|
|
1145
|
+
this.#resolveService(fqdn, records)
|
|
1146
|
+
break
|
|
1147
|
+
}
|
|
1148
|
+
}
|
|
1149
|
+
}
|
|
1150
|
+
}
|
|
1151
|
+
}
|
|
1152
|
+
|
|
1153
|
+
/**
|
|
1154
|
+
* Extract subtypes from subtype PTR records and add them to known services.
|
|
1155
|
+
* Subtype PTR records have the form: _subtype._sub._type._proto.domain → instance FQDN.
|
|
1156
|
+
* @param {DnsRecord[]} records
|
|
1157
|
+
*/
|
|
1158
|
+
#handleSubtypeRecords(records) {
|
|
1159
|
+
for (const record of records) {
|
|
1160
|
+
if (record.type !== RecordType.PTR) continue
|
|
1161
|
+
const nameLower = record.name.toLowerCase()
|
|
1162
|
+
|
|
1163
|
+
// Check for _sub. pattern in the PTR name
|
|
1164
|
+
const subIdx = nameLower.indexOf('._sub.')
|
|
1165
|
+
if (subIdx === -1) continue
|
|
1166
|
+
|
|
1167
|
+
const subtype = record.name.slice(0, subIdx)
|
|
1168
|
+
const serviceFqdn = /** @type {string} */ (record.data)
|
|
1169
|
+
const service = this.services.get(serviceFqdn)
|
|
1170
|
+
if (service && !service.subtypes.includes(subtype)) {
|
|
1171
|
+
const updatedService = {
|
|
1172
|
+
...service,
|
|
1173
|
+
subtypes: [...service.subtypes, subtype],
|
|
1174
|
+
updatedAt: Date.now(),
|
|
1175
|
+
}
|
|
1176
|
+
this.services.set(serviceFqdn, updatedService)
|
|
1177
|
+
this.#emit({ type: 'serviceUpdated', service: updatedService })
|
|
1178
|
+
}
|
|
1179
|
+
}
|
|
1180
|
+
}
|
|
1181
|
+
|
|
1182
|
+
/**
|
|
1183
|
+
* Collect A and AAAA addresses for a hostname from a set of records.
|
|
1184
|
+
* Also reports whether any address record had the cache-flush bit set
|
|
1185
|
+
* (RFC 6762 §10.2), which means the sender is asserting these are the
|
|
1186
|
+
* complete set and old cached addresses should be replaced, not merged.
|
|
1187
|
+
* @param {string} hostname
|
|
1188
|
+
* @param {DnsRecord[]} records
|
|
1189
|
+
* @returns {{ addresses: string[], cacheFlush: boolean }}
|
|
1190
|
+
*/
|
|
1191
|
+
#collectAddresses(hostname, records) {
|
|
1192
|
+
/** @type {string[]} */
|
|
1193
|
+
const addresses = []
|
|
1194
|
+
const hostLower = hostname.toLowerCase()
|
|
1195
|
+
let cacheFlush = false
|
|
1196
|
+
for (const record of records) {
|
|
1197
|
+
if (record.name.toLowerCase() !== hostLower) continue
|
|
1198
|
+
if (record.type === RecordType.A || record.type === RecordType.AAAA) {
|
|
1199
|
+
const addr = /** @type {string} */ (record.data)
|
|
1200
|
+
if (!addresses.includes(addr)) {
|
|
1201
|
+
addresses.push(addr)
|
|
1202
|
+
}
|
|
1203
|
+
if (record.cacheFlush) cacheFlush = true
|
|
1204
|
+
}
|
|
1205
|
+
}
|
|
1206
|
+
return { addresses, cacheFlush }
|
|
1207
|
+
}
|
|
1208
|
+
|
|
1209
|
+
/**
|
|
1210
|
+
* Schedule a service for removal after 1 second (RFC 6762 §10.1).
|
|
1211
|
+
* If a re-announcement arrives within that window, the goodbye is cancelled.
|
|
1212
|
+
* @param {string} serviceFqdn
|
|
1213
|
+
*/
|
|
1214
|
+
#scheduleGoodbye(serviceFqdn) {
|
|
1215
|
+
// Don't schedule if already pending or service doesn't exist
|
|
1216
|
+
if (this.#pendingGoodbyes.has(serviceFqdn)) return
|
|
1217
|
+
if (!this.services.has(serviceFqdn)) return
|
|
1218
|
+
|
|
1219
|
+
const timer = setTimeout(() => {
|
|
1220
|
+
this.#pendingGoodbyes.delete(serviceFqdn)
|
|
1221
|
+
this.#removeService(serviceFqdn)
|
|
1222
|
+
}, 1000)
|
|
1223
|
+
|
|
1224
|
+
this.#pendingGoodbyes.set(serviceFqdn, timer)
|
|
1225
|
+
}
|
|
1226
|
+
|
|
1227
|
+
/**
|
|
1228
|
+
* Cancel a pending goodbye for a service (e.g. when a re-announcement arrives).
|
|
1229
|
+
* @param {string} serviceFqdn
|
|
1230
|
+
*/
|
|
1231
|
+
#cancelGoodbye(serviceFqdn) {
|
|
1232
|
+
const timer = this.#pendingGoodbyes.get(serviceFqdn)
|
|
1233
|
+
if (timer) {
|
|
1234
|
+
clearTimeout(timer)
|
|
1235
|
+
this.#pendingGoodbyes.delete(serviceFqdn)
|
|
1236
|
+
}
|
|
1237
|
+
}
|
|
1238
|
+
|
|
1239
|
+
/**
|
|
1240
|
+
* Update per-address receive timestamps for a service.
|
|
1241
|
+
* New addresses get the current time; existing timestamps are preserved.
|
|
1242
|
+
* @param {string} serviceFqdn
|
|
1243
|
+
* @param {string[]} addresses
|
|
1244
|
+
*/
|
|
1245
|
+
#updateAddressTimestamps(serviceFqdn, addresses) {
|
|
1246
|
+
const now = Date.now()
|
|
1247
|
+
let addrTimestamps = this.#addressTimestamps.get(serviceFqdn)
|
|
1248
|
+
if (!addrTimestamps) {
|
|
1249
|
+
addrTimestamps = new Map()
|
|
1250
|
+
this.#addressTimestamps.set(serviceFqdn, addrTimestamps)
|
|
1251
|
+
}
|
|
1252
|
+
for (const addr of addresses) {
|
|
1253
|
+
if (!addrTimestamps.has(addr)) {
|
|
1254
|
+
addrTimestamps.set(addr, now)
|
|
1255
|
+
}
|
|
1256
|
+
}
|
|
1257
|
+
// Remove timestamps for addresses no longer present
|
|
1258
|
+
for (const addr of addrTimestamps.keys()) {
|
|
1259
|
+
if (!addresses.includes(addr)) {
|
|
1260
|
+
addrTimestamps.delete(addr)
|
|
1261
|
+
}
|
|
1262
|
+
}
|
|
1263
|
+
}
|
|
1264
|
+
|
|
1265
|
+
/**
|
|
1266
|
+
* Remove a service and emit serviceDown.
|
|
1267
|
+
* @param {string} serviceFqdn
|
|
1268
|
+
*/
|
|
1269
|
+
#removeService(serviceFqdn) {
|
|
1270
|
+
this.#cancelGoodbye(serviceFqdn)
|
|
1271
|
+
this.#cancelReconfirm(serviceFqdn)
|
|
1272
|
+
this.#clearPoofTracking(serviceFqdn)
|
|
1273
|
+
// Always clean up tracking state, even for unresolved (pending) services.
|
|
1274
|
+
// Without this, expired PTR records for services that never received an SRV
|
|
1275
|
+
// would stay in #knownPtrRecords/#pendingFqdns and cause repeated failed
|
|
1276
|
+
// removal attempts from #processRecordLifecycle.
|
|
1277
|
+
this.#knownPtrRecords.delete(serviceFqdn)
|
|
1278
|
+
this.#pendingFqdns.delete(serviceFqdn)
|
|
1279
|
+
const service = this.services.get(serviceFqdn)
|
|
1280
|
+
if (service) {
|
|
1281
|
+
this.services.delete(serviceFqdn)
|
|
1282
|
+
this.#addressTimestamps.delete(serviceFqdn)
|
|
1283
|
+
this.#emit({ type: 'serviceDown', service })
|
|
1284
|
+
}
|
|
1285
|
+
}
|
|
1286
|
+
|
|
1287
|
+
/**
|
|
1288
|
+
* Push an event to the buffer and wake any waiting consumer.
|
|
1289
|
+
* @param {BrowseEvent} event
|
|
1290
|
+
*/
|
|
1291
|
+
#emit(event) {
|
|
1292
|
+
// Cap event buffer to prevent memory exhaustion when events aren't consumed
|
|
1293
|
+
/* c8 ignore next 3 -- overflow guard requires 1000+ unconsumed events */
|
|
1294
|
+
if (this.#eventBuffer.length >= MAX_EVENT_BUFFER) {
|
|
1295
|
+
this.#eventBuffer.shift()
|
|
1296
|
+
}
|
|
1297
|
+
this.#eventBuffer.push(event)
|
|
1298
|
+
if (this.#eventSignal) {
|
|
1299
|
+
this.#eventSignal.resolve()
|
|
1300
|
+
}
|
|
1301
|
+
}
|
|
1302
|
+
}
|
|
1303
|
+
|
|
1304
|
+
/**
|
|
1305
|
+
* Shallow-compare two TXT objects.
|
|
1306
|
+
* @param {Record<string, string | true>} a
|
|
1307
|
+
* @param {Record<string, string | true>} b
|
|
1308
|
+
* @returns {boolean}
|
|
1309
|
+
*/
|
|
1310
|
+
function txtEqual(a, b) {
|
|
1311
|
+
const keysA = Object.keys(a)
|
|
1312
|
+
const keysB = Object.keys(b)
|
|
1313
|
+
if (keysA.length !== keysB.length) return false
|
|
1314
|
+
return keysA.every((k) => a[k] === b[k])
|
|
1315
|
+
}
|
|
1316
|
+
|
|
1317
|
+
/**
|
|
1318
|
+
* Compare two string arrays (order-sensitive).
|
|
1319
|
+
* @param {string[]} a
|
|
1320
|
+
* @param {string[]} b
|
|
1321
|
+
* @returns {boolean}
|
|
1322
|
+
*/
|
|
1323
|
+
function arrayEqual(a, b) {
|
|
1324
|
+
if (a.length !== b.length) return false
|
|
1325
|
+
return a.every((v, i) => v === b[i])
|
|
1326
|
+
}
|
|
1327
|
+
|
|
1328
|
+
// ─── AllServiceBrowser ─────────────────────────────────────────────────
|
|
1329
|
+
|
|
1330
|
+
/**
|
|
1331
|
+
* Maximum number of buffered events for AllServiceBrowser.
|
|
1332
|
+
*/
|
|
1333
|
+
const MAX_ALL_EVENT_BUFFER = 4096
|
|
1334
|
+
|
|
1335
|
+
/**
|
|
1336
|
+
* AllServiceBrowser — discovers all service instances on the network by
|
|
1337
|
+
* first enumerating service types (RFC 6763 §9), then spawning a
|
|
1338
|
+
* ServiceBrowser for each discovered type.
|
|
1339
|
+
*
|
|
1340
|
+
* Presents the same async iterable interface as ServiceBrowser, yielding
|
|
1341
|
+
* fully resolved BrowseEvents with real host, port, and addresses.
|
|
1342
|
+
*/
|
|
1343
|
+
export class AllServiceBrowser {
|
|
1344
|
+
/** @type {MdnsTransport} */
|
|
1345
|
+
#transport
|
|
1346
|
+
#domain
|
|
1347
|
+
#destroyed = false
|
|
1348
|
+
|
|
1349
|
+
/**
|
|
1350
|
+
* The internal type enumeration browser.
|
|
1351
|
+
* @type {ServiceBrowser | null}
|
|
1352
|
+
*/
|
|
1353
|
+
#typeBrowser = null
|
|
1354
|
+
|
|
1355
|
+
/**
|
|
1356
|
+
* Sub-browsers for each discovered service type, keyed by type string
|
|
1357
|
+
* (e.g. "_http._tcp.local").
|
|
1358
|
+
* @type {Map<string, ServiceBrowser>}
|
|
1359
|
+
*/
|
|
1360
|
+
#typeBrowsers = new Map()
|
|
1361
|
+
|
|
1362
|
+
/**
|
|
1363
|
+
* Merged live map of all discovered service instances across all types.
|
|
1364
|
+
* @type {Map<string, Service>}
|
|
1365
|
+
*/
|
|
1366
|
+
services = new Map()
|
|
1367
|
+
|
|
1368
|
+
/** @type {BrowseEvent[]} */
|
|
1369
|
+
#eventBuffer = []
|
|
1370
|
+
/** @type {Deferred<void> | null} */
|
|
1371
|
+
#eventSignal = null
|
|
1372
|
+
/** @type {boolean} */
|
|
1373
|
+
#iterating = false
|
|
1374
|
+
|
|
1375
|
+
/** @type {AbortSignal | undefined} */
|
|
1376
|
+
#signal
|
|
1377
|
+
/** @type {(() => void) | null} */
|
|
1378
|
+
#abortHandler = null
|
|
1379
|
+
/**
|
|
1380
|
+
* If set, the iterator throws this instead of returning done: true,
|
|
1381
|
+
* matching the Node.js convention (events.on, Readable, setInterval).
|
|
1382
|
+
* @type {any}
|
|
1383
|
+
*/
|
|
1384
|
+
#destroyReason = undefined
|
|
1385
|
+
/** @type {(() => void) | null} */
|
|
1386
|
+
#onDestroy = null
|
|
1387
|
+
|
|
1388
|
+
/**
|
|
1389
|
+
* @param {MdnsTransport} transport
|
|
1390
|
+
* @param {object} options
|
|
1391
|
+
* @param {string} options.domain
|
|
1392
|
+
* @param {AbortSignal} [options.signal]
|
|
1393
|
+
* @param {() => void} [options.onDestroy]
|
|
1394
|
+
*/
|
|
1395
|
+
constructor(transport, { domain, signal, onDestroy }) {
|
|
1396
|
+
this.#transport = transport
|
|
1397
|
+
this.#domain = domain
|
|
1398
|
+
this.#signal = signal
|
|
1399
|
+
this.#onDestroy = onDestroy ?? null
|
|
1400
|
+
|
|
1401
|
+
// Start the type enumeration browser
|
|
1402
|
+
this.#typeBrowser = new ServiceBrowser(transport, {
|
|
1403
|
+
queryName: SERVICE_TYPE_ENUMERATION,
|
|
1404
|
+
serviceType: '_services._dns-sd._udp',
|
|
1405
|
+
domain,
|
|
1406
|
+
protocol: 'udp',
|
|
1407
|
+
isTypeEnumeration: true,
|
|
1408
|
+
})
|
|
1409
|
+
|
|
1410
|
+
// Consume type events in the background
|
|
1411
|
+
this.#consumeTypes()
|
|
1412
|
+
|
|
1413
|
+
// Handle AbortSignal — the abort reason is forwarded to destroy() so the
|
|
1414
|
+
// iterator throws it (matching Node.js convention: events.on, Readable, setInterval).
|
|
1415
|
+
if (signal) {
|
|
1416
|
+
if (signal.aborted) {
|
|
1417
|
+
this.destroy(signal.reason)
|
|
1418
|
+
} else {
|
|
1419
|
+
this.#abortHandler = () => this.destroy(signal.reason)
|
|
1420
|
+
signal.addEventListener('abort', this.#abortHandler, { once: true })
|
|
1421
|
+
}
|
|
1422
|
+
}
|
|
1423
|
+
}
|
|
1424
|
+
|
|
1425
|
+
/**
|
|
1426
|
+
* Get the first discovered service. Resolves as soon as any service
|
|
1427
|
+
* instance emits a serviceUp event.
|
|
1428
|
+
* @returns {Promise<Service>}
|
|
1429
|
+
*/
|
|
1430
|
+
async first() {
|
|
1431
|
+
for await (const event of this) {
|
|
1432
|
+
if (event.type === 'serviceUp') {
|
|
1433
|
+
return event.service
|
|
1434
|
+
}
|
|
1435
|
+
}
|
|
1436
|
+
throw new Error('Browser destroyed before finding a service')
|
|
1437
|
+
}
|
|
1438
|
+
|
|
1439
|
+
/**
|
|
1440
|
+
* Flush all discovered services and restart querying from scratch.
|
|
1441
|
+
*
|
|
1442
|
+
* Call this after a network interface change (e.g. WiFi reconnect).
|
|
1443
|
+
* Delegates to each internal ServiceBrowser and clears the merged
|
|
1444
|
+
* services map.
|
|
1445
|
+
*/
|
|
1446
|
+
/* c8 ignore next 11 -- AllServiceBrowser.resetNetwork requires active type browsers */
|
|
1447
|
+
resetNetwork() {
|
|
1448
|
+
if (this.#destroyed) return
|
|
1449
|
+
|
|
1450
|
+
if (this.#typeBrowser) {
|
|
1451
|
+
this.#typeBrowser.resetNetwork()
|
|
1452
|
+
}
|
|
1453
|
+
for (const browser of this.#typeBrowsers.values()) {
|
|
1454
|
+
browser.resetNetwork()
|
|
1455
|
+
}
|
|
1456
|
+
this.services.clear()
|
|
1457
|
+
}
|
|
1458
|
+
|
|
1459
|
+
/**
|
|
1460
|
+
* Stop all sub-browsers and end the async iterator.
|
|
1461
|
+
* @param {any} [reason] - If provided, the iterator throws this instead of
|
|
1462
|
+
* returning done: true (matching Node.js convention for AbortSignal).
|
|
1463
|
+
*/
|
|
1464
|
+
destroy(reason) {
|
|
1465
|
+
if (this.#destroyed) return
|
|
1466
|
+
this.#destroyed = true
|
|
1467
|
+
if (reason !== undefined) this.#destroyReason = reason
|
|
1468
|
+
|
|
1469
|
+
// Destroy the type enumeration browser
|
|
1470
|
+
if (this.#typeBrowser) {
|
|
1471
|
+
this.#typeBrowser.destroy()
|
|
1472
|
+
this.#typeBrowser = null
|
|
1473
|
+
}
|
|
1474
|
+
|
|
1475
|
+
// Destroy all sub-browsers
|
|
1476
|
+
for (const browser of this.#typeBrowsers.values()) {
|
|
1477
|
+
browser.destroy()
|
|
1478
|
+
}
|
|
1479
|
+
this.#typeBrowsers.clear()
|
|
1480
|
+
|
|
1481
|
+
/* c8 ignore next 4 -- abort handler cleanup mirrors ServiceBrowser */
|
|
1482
|
+
if (this.#abortHandler && this.#signal) {
|
|
1483
|
+
this.#signal.removeEventListener('abort', this.#abortHandler)
|
|
1484
|
+
this.#abortHandler = null
|
|
1485
|
+
}
|
|
1486
|
+
|
|
1487
|
+
// Notify parent — deferred via queueMicrotask (see ServiceBrowser.destroy).
|
|
1488
|
+
if (this.#onDestroy) {
|
|
1489
|
+
const cb = this.#onDestroy
|
|
1490
|
+
this.#onDestroy = null
|
|
1491
|
+
queueMicrotask(cb)
|
|
1492
|
+
}
|
|
1493
|
+
|
|
1494
|
+
// Wake the iterator so it can observe #destroyed and throw.
|
|
1495
|
+
/* c8 ignore start -- mirrors ServiceBrowser */
|
|
1496
|
+
if (this.#eventSignal) {
|
|
1497
|
+
this.#eventSignal.resolve()
|
|
1498
|
+
}
|
|
1499
|
+
/* c8 ignore stop */
|
|
1500
|
+
}
|
|
1501
|
+
|
|
1502
|
+
/** @returns {Promise<void>} */
|
|
1503
|
+
/* c8 ignore start -- asyncDispose is a standard protocol method */
|
|
1504
|
+
async [Symbol.asyncDispose]() {
|
|
1505
|
+
this.destroy()
|
|
1506
|
+
}
|
|
1507
|
+
/* c8 ignore stop */
|
|
1508
|
+
|
|
1509
|
+
/** @returns {AsyncIterableIterator<BrowseEvent>} */
|
|
1510
|
+
[Symbol.asyncIterator]() {
|
|
1511
|
+
if (this.#destroyed) throw new Error('Browser has been destroyed')
|
|
1512
|
+
if (this.#iterating) {
|
|
1513
|
+
throw new Error('AllServiceBrowser only supports a single concurrent async iterator')
|
|
1514
|
+
}
|
|
1515
|
+
this.#iterating = true
|
|
1516
|
+
|
|
1517
|
+
/** @type {AsyncIterableIterator<BrowseEvent>} */
|
|
1518
|
+
const iter = {
|
|
1519
|
+
next: async () => {
|
|
1520
|
+
while (true) {
|
|
1521
|
+
if (this.#eventBuffer.length > 0) {
|
|
1522
|
+
const value = /** @type {BrowseEvent} */ (this.#eventBuffer.shift())
|
|
1523
|
+
return { value, done: false }
|
|
1524
|
+
}
|
|
1525
|
+
if (this.#destroyed) {
|
|
1526
|
+
this.#iterating = false
|
|
1527
|
+
if (this.#destroyReason !== undefined) {
|
|
1528
|
+
throw this.#destroyReason
|
|
1529
|
+
}
|
|
1530
|
+
return { value: undefined, done: true }
|
|
1531
|
+
}
|
|
1532
|
+
this.#eventSignal = createDeferred()
|
|
1533
|
+
await this.#eventSignal.promise
|
|
1534
|
+
this.#eventSignal = null
|
|
1535
|
+
}
|
|
1536
|
+
},
|
|
1537
|
+
return: async () => {
|
|
1538
|
+
this.#iterating = false
|
|
1539
|
+
this.destroy()
|
|
1540
|
+
return { value: undefined, done: true }
|
|
1541
|
+
},
|
|
1542
|
+
[Symbol.asyncIterator]() { return iter },
|
|
1543
|
+
}
|
|
1544
|
+
return iter
|
|
1545
|
+
}
|
|
1546
|
+
|
|
1547
|
+
// ─── Private methods ───────────────────────────────────────────────
|
|
1548
|
+
|
|
1549
|
+
/**
|
|
1550
|
+
* Push an event to the buffer and wake any waiting consumer.
|
|
1551
|
+
* @param {BrowseEvent} event
|
|
1552
|
+
*/
|
|
1553
|
+
#emit(event) {
|
|
1554
|
+
/* c8 ignore next 3 -- overflow guard requires 1000+ unconsumed events */
|
|
1555
|
+
if (this.#eventBuffer.length >= MAX_ALL_EVENT_BUFFER) {
|
|
1556
|
+
this.#eventBuffer.shift()
|
|
1557
|
+
}
|
|
1558
|
+
this.#eventBuffer.push(event)
|
|
1559
|
+
if (this.#eventSignal) {
|
|
1560
|
+
this.#eventSignal.resolve()
|
|
1561
|
+
}
|
|
1562
|
+
}
|
|
1563
|
+
|
|
1564
|
+
/**
|
|
1565
|
+
* Consume type enumeration events in the background and spawn/destroy
|
|
1566
|
+
* sub-browsers as types appear and disappear.
|
|
1567
|
+
*/
|
|
1568
|
+
async #consumeTypes() {
|
|
1569
|
+
if (!this.#typeBrowser) return
|
|
1570
|
+
|
|
1571
|
+
/* c8 ignore start -- type enumeration consumption requires browseAll with real network traffic */
|
|
1572
|
+
try {
|
|
1573
|
+
for await (const event of this.#typeBrowser) {
|
|
1574
|
+
if (this.#destroyed) break
|
|
1575
|
+
|
|
1576
|
+
if (event.type === 'serviceUp') {
|
|
1577
|
+
this.#addTypeBrowser(event.service.fqdn)
|
|
1578
|
+
} else if (event.type === 'serviceDown') {
|
|
1579
|
+
this.#removeTypeBrowser(event.service.fqdn)
|
|
1580
|
+
}
|
|
1581
|
+
}
|
|
1582
|
+
} catch {
|
|
1583
|
+
// Browser was destroyed or transport errored — stop consuming
|
|
1584
|
+
}
|
|
1585
|
+
/* c8 ignore stop */
|
|
1586
|
+
}
|
|
1587
|
+
|
|
1588
|
+
/**
|
|
1589
|
+
* Start browsing for instances of a specific service type.
|
|
1590
|
+
* @param {string} typeFqdn - e.g. "_http._tcp.local"
|
|
1591
|
+
*/
|
|
1592
|
+
#addTypeBrowser(typeFqdn) {
|
|
1593
|
+
if (this.#typeBrowsers.has(typeFqdn)) return
|
|
1594
|
+
|
|
1595
|
+
// Parse the type FQDN into components.
|
|
1596
|
+
// typeFqdn is e.g. "_http._tcp.local" → type="_http._tcp", domain="local"
|
|
1597
|
+
// or "_http._tcp.example.com" → type="_http._tcp", domain="example.com"
|
|
1598
|
+
// Service type is always the first two labels (_service._proto).
|
|
1599
|
+
const parts = typeFqdn.split('.')
|
|
1600
|
+
if (parts.length < 3) return
|
|
1601
|
+
|
|
1602
|
+
const type = parts.slice(0, 2).join('.')
|
|
1603
|
+
const domain = parts.slice(2).join('.')
|
|
1604
|
+
const protocol = parts[1].replace(/^_/, '')
|
|
1605
|
+
|
|
1606
|
+
const browser = new ServiceBrowser(this.#transport, {
|
|
1607
|
+
queryName: typeFqdn,
|
|
1608
|
+
serviceType: type,
|
|
1609
|
+
domain,
|
|
1610
|
+
protocol,
|
|
1611
|
+
})
|
|
1612
|
+
|
|
1613
|
+
this.#typeBrowsers.set(typeFqdn, browser)
|
|
1614
|
+
this.#consumeInstanceEvents(typeFqdn, browser)
|
|
1615
|
+
}
|
|
1616
|
+
|
|
1617
|
+
/**
|
|
1618
|
+
* Stop browsing for instances of a service type and remove its services.
|
|
1619
|
+
* @param {string} typeFqdn
|
|
1620
|
+
*/
|
|
1621
|
+
/* c8 ignore next 13 -- requires a service type to disappear from the network */
|
|
1622
|
+
#removeTypeBrowser(typeFqdn) {
|
|
1623
|
+
const browser = this.#typeBrowsers.get(typeFqdn)
|
|
1624
|
+
if (!browser) return
|
|
1625
|
+
|
|
1626
|
+
// Emit serviceDown for all instances of this type
|
|
1627
|
+
for (const [fqdn, service] of browser.services) {
|
|
1628
|
+
this.services.delete(fqdn)
|
|
1629
|
+
this.#emit({ type: 'serviceDown', service })
|
|
1630
|
+
}
|
|
1631
|
+
|
|
1632
|
+
browser.destroy()
|
|
1633
|
+
this.#typeBrowsers.delete(typeFqdn)
|
|
1634
|
+
}
|
|
1635
|
+
|
|
1636
|
+
/**
|
|
1637
|
+
* Forward events from a sub-browser into the unified event stream.
|
|
1638
|
+
* @param {string} typeFqdn
|
|
1639
|
+
* @param {ServiceBrowser} browser
|
|
1640
|
+
*/
|
|
1641
|
+
/* c8 ignore start -- instance event forwarding requires browseAll with real discovery */
|
|
1642
|
+
async #consumeInstanceEvents(typeFqdn, browser) {
|
|
1643
|
+
try {
|
|
1644
|
+
for await (const event of browser) {
|
|
1645
|
+
if (this.#destroyed) break
|
|
1646
|
+
|
|
1647
|
+
// Mirror changes into the merged services map
|
|
1648
|
+
if (event.type === 'serviceUp' || event.type === 'serviceUpdated') {
|
|
1649
|
+
this.services.set(event.service.fqdn, event.service)
|
|
1650
|
+
} else if (event.type === 'serviceDown') {
|
|
1651
|
+
this.services.delete(event.service.fqdn)
|
|
1652
|
+
}
|
|
1653
|
+
|
|
1654
|
+
this.#emit(event)
|
|
1655
|
+
}
|
|
1656
|
+
} catch {
|
|
1657
|
+
// Browser was destroyed or transport errored — stop consuming
|
|
1658
|
+
}
|
|
1659
|
+
}
|
|
1660
|
+
/* c8 ignore stop */
|
|
1661
|
+
}
|