@matter/protocol 0.12.3 → 0.12.4-alpha.0-20250210-ad8edf096

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. package/dist/cjs/interaction/AttributeDataEncoder.d.ts +11 -8
  2. package/dist/cjs/interaction/AttributeDataEncoder.d.ts.map +1 -1
  3. package/dist/cjs/interaction/AttributeDataEncoder.js +11 -0
  4. package/dist/cjs/interaction/AttributeDataEncoder.js.map +1 -1
  5. package/dist/cjs/interaction/InteractionMessenger.d.ts +1 -1
  6. package/dist/cjs/interaction/InteractionMessenger.d.ts.map +1 -1
  7. package/dist/cjs/interaction/InteractionMessenger.js +155 -41
  8. package/dist/cjs/interaction/InteractionMessenger.js.map +1 -1
  9. package/dist/cjs/peer/ControllerCommissioningFlow.d.ts.map +1 -1
  10. package/dist/cjs/peer/ControllerCommissioningFlow.js +1 -5
  11. package/dist/cjs/peer/ControllerCommissioningFlow.js.map +1 -1
  12. package/dist/esm/interaction/AttributeDataEncoder.d.ts +11 -8
  13. package/dist/esm/interaction/AttributeDataEncoder.d.ts.map +1 -1
  14. package/dist/esm/interaction/AttributeDataEncoder.js +11 -0
  15. package/dist/esm/interaction/AttributeDataEncoder.js.map +1 -1
  16. package/dist/esm/interaction/InteractionMessenger.d.ts +1 -1
  17. package/dist/esm/interaction/InteractionMessenger.d.ts.map +1 -1
  18. package/dist/esm/interaction/InteractionMessenger.js +156 -41
  19. package/dist/esm/interaction/InteractionMessenger.js.map +1 -1
  20. package/dist/esm/peer/ControllerCommissioningFlow.d.ts.map +1 -1
  21. package/dist/esm/peer/ControllerCommissioningFlow.js +1 -5
  22. package/dist/esm/peer/ControllerCommissioningFlow.js.map +1 -1
  23. package/package.json +6 -6
  24. package/src/interaction/AttributeDataEncoder.ts +23 -8
  25. package/src/interaction/InteractionMessenger.ts +266 -49
  26. package/src/peer/ControllerCommissioningFlow.ts +2 -5
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@matter/protocol",
3
- "version": "0.12.3",
3
+ "version": "0.12.4-alpha.0-20250210-ad8edf096",
4
4
  "description": "Low-level APIs for Matter interaction",
5
5
  "keywords": [
6
6
  "iot",
@@ -40,14 +40,14 @@
40
40
  "#*": "./src/*"
41
41
  },
42
42
  "dependencies": {
43
- "@matter/general": "0.12.3",
44
- "@matter/model": "0.12.3",
45
- "@matter/types": "0.12.3",
43
+ "@matter/general": "0.12.4-alpha.0-20250210-ad8edf096",
44
+ "@matter/model": "0.12.4-alpha.0-20250210-ad8edf096",
45
+ "@matter/types": "0.12.4-alpha.0-20250210-ad8edf096",
46
46
  "@noble/curves": "^1.8.1"
47
47
  },
48
48
  "devDependencies": {
49
- "@matter/tools": "0.12.3",
50
- "@matter/testing": "0.12.3"
49
+ "@matter/tools": "0.12.4-alpha.0-20250210-ad8edf096",
50
+ "@matter/testing": "0.12.4-alpha.0-20250210-ad8edf096"
51
51
  },
52
52
  "files": [
53
53
  "dist/**/*",
@@ -30,14 +30,20 @@ type FullAttributePath = {
30
30
  dataVersion?: number;
31
31
  };
32
32
 
33
+ /** Type for TlvAttributeReportData where the real data are represented with the schema and the JS value. */
34
+ type AttributeDataPayload = Omit<TypeFromSchema<typeof TlvAttributeReportData>, "data"> & {
35
+ schema: TlvSchema<any>;
36
+ payload: any;
37
+ };
38
+
33
39
  /** Type for TlvAttributeReport where the real data are represented with the schema and the JS value. */
34
40
  export type AttributeReportPayload = Omit<TypeFromSchema<typeof TlvAttributeReport>, "attributeData"> & {
35
41
  attributeData?: AttributeDataPayload;
36
42
  hasFabricSensitiveData: boolean;
37
43
  };
38
44
 
39
- /** Type for TlvAttributeReportData where the real data are represented with the schema and the JS value. */
40
- type AttributeDataPayload = Omit<TypeFromSchema<typeof TlvAttributeReportData>, "data"> & {
45
+ /** Type for TlvEventData where the real data are represented with the schema and the JS value. */
46
+ export type EventDataPayload = Omit<TypeFromSchema<typeof TlvEventData>, "data"> & {
41
47
  schema: TlvSchema<any>;
42
48
  payload: any;
43
49
  };
@@ -48,12 +54,6 @@ export type EventReportPayload = Omit<TypeFromSchema<typeof TlvEventReport>, "ev
48
54
  hasFabricSensitiveData: boolean;
49
55
  };
50
56
 
51
- /** Type for TlvEventData where the real data are represented with the schema and the JS value. */
52
- export type EventDataPayload = Omit<TypeFromSchema<typeof TlvEventData>, "data"> & {
53
- schema: TlvSchema<any>;
54
- payload: any;
55
- };
56
-
57
57
  export type EventOrAttributeDataPayload = AttributeReportPayload | EventReportPayload;
58
58
 
59
59
  /** A base type for a DataReport which removes the fields of the actual attribute or event content */
@@ -70,6 +70,21 @@ export type DataReportPayload = BaseDataReport & {
70
70
  */
71
71
  export type DataReportPayloadIterator = IterableIterator<EventOrAttributeDataPayload>;
72
72
 
73
+ export function encodeAttributePayloadData(
74
+ attributePayload: AttributeReportPayload,
75
+ options?: TlvEncodingOptions,
76
+ ): TlvStream {
77
+ const { attributeData } = attributePayload;
78
+ if (attributeData === undefined) {
79
+ throw new MatterFlowError(
80
+ `Cannot encode Attribute Payload data with just a attributeStatus: ${Logger.toJSON(attributePayload)}`,
81
+ );
82
+ }
83
+
84
+ const { schema, payload } = attributeData;
85
+ return schema.encodeTlv(payload, options);
86
+ }
87
+
73
88
  /** Encodes an AttributeReportPayload into a TlvStream (used for TlvAny type). */
74
89
  export function encodeAttributePayload(
75
90
  attributePayload: AttributeReportPayload,
@@ -28,6 +28,7 @@ import {
28
28
  TlvReadRequest,
29
29
  TlvSchema,
30
30
  TlvStatusResponse,
31
+ TlvStream,
31
32
  TlvSubscribeRequest,
32
33
  TlvSubscribeResponse,
33
34
  TlvTimedRequest,
@@ -51,6 +52,7 @@ import {
51
52
  chunkAttributePayload,
52
53
  DataReportPayloadIterator,
53
54
  encodeAttributePayload,
55
+ encodeAttributePayloadData,
54
56
  encodeEventPayload,
55
57
  EventReportPayload,
56
58
  } from "./AttributeDataEncoder.js";
@@ -80,6 +82,19 @@ export type WriteResponse = TypeFromSchema<typeof TlvWriteResponse>;
80
82
 
81
83
  const logger = Logger.get("InteractionMessenger");
82
84
 
85
+ /**
86
+ * Maximum number of messages that can be queued for a DataReport because they were not fitting into
87
+ * the current Report. If we reach this number we send them out forced.
88
+ */
89
+ const DATA_REPORT_MAX_QUEUED_ATTRIBUTE_MESSAGES = 20;
90
+
91
+ /**
92
+ * An empty DataReport with all fields is roughly 23 bytes without data content.
93
+ * So as soon as available bytes are less than 40 we should send the message. This value is the result
94
+ * of some manual tests with usual device types
95
+ */
96
+ const DATA_REPORT_MIN_AVAILABLE_BYTES_BEFORE_SENDING = 40;
97
+
83
98
  class InteractionMessenger {
84
99
  constructor(protected exchange: MessageExchange) {}
85
100
 
@@ -307,37 +322,143 @@ export class InteractionServerMessenger extends InteractionMessenger {
307
322
 
308
323
  if (payload !== undefined) {
309
324
  // TODO Add tag compressing once https://github.com/project-chip/connectedhomeip/issues/29359 is solved
325
+ // (or likely remove it)
310
326
  dataReport.moreChunkedMessages = true; // Assume we have multiple chunks, also for size calculation
327
+
328
+ /** The empty data report to calculate the size of the message. */
311
329
  const emptyDataReportBytes = TlvDataReportForSend.encode(dataReport);
312
330
 
331
+ /** Do we have received all data? In that case only the queue is left if filled. */
332
+ let allDataReceived = false;
333
+
334
+ /** Should the queue be sent out first? This defaults to true and is set to false if we try to fill up the message. */
335
+ let processQueueFirst = true;
336
+
337
+ /** Helper method to send out the current dataReport and reset the relevant state for the next chunk. */
313
338
  const sendAndResetReport = async () => {
314
339
  await this.sendDataReportMessage(dataReport, waitForAck);
340
+ // Empty the dataReport data fields for the next chunk and reset the messageSize
315
341
  delete dataReport.attributeReports;
316
342
  delete dataReport.eventReports;
317
- messageSize = emptyDataReportBytes.length + 3; // We add 3 bytes because either one of the both removed arrays will be added or it is the last message and we don't care
343
+ messageSize = emptyDataReportBytes.length;
344
+ processQueueFirst = true; // After sending a message we first try to process queue
318
345
  };
346
+
347
+ /** Current size of the message */
319
348
  let messageSize = emptyDataReportBytes.length;
320
349
 
321
- let attributeReportsToSend = new Array<AttributeReportPayload>();
322
- let eventReportsToSend = new Array<EventReportPayload>();
350
+ /** Queue of attribute reports to send */
351
+ const attributeReportsToSend = new Array<{
352
+ /** The attribute report to send */
353
+ attributeReport: AttributeReportPayload;
354
+ /** The encoded attribute report */
355
+ encoded: TlvStream;
356
+ /** The size of the encoded attribute report */
357
+ encodedSize: number;
358
+
359
+ /** If the attribute report needs to be sent in the next message. When set no new data are added. */
360
+ needSendNext?: boolean;
361
+ }>();
362
+
363
+ /** Queue of event reports to send */
364
+ const eventReportsToSend = new Array<{
365
+ /** The event report to send */
366
+ eventReport: EventReportPayload;
367
+
368
+ /** The encoded event report */
369
+ encoded: TlvStream;
370
+
371
+ /** The size of the encoded event report */
372
+ encodedSize: number;
373
+ }>();
323
374
 
324
375
  while (true) {
325
- // If we have no data to process we need to get the next chunk
326
- // If no more data is available we cancel the while loop and send final message
327
- if (attributeReportsToSend.length === 0 && eventReportsToSend.length === 0) {
376
+ // Decide if entries in the queue are processed first or if we read new data
377
+ if (
378
+ !allDataReceived &&
379
+ ((attributeReportsToSend.length === 0 && eventReportsToSend.length === 0) ||
380
+ (attributeReportsToSend.length <= DATA_REPORT_MAX_QUEUED_ATTRIBUTE_MESSAGES &&
381
+ !processQueueFirst &&
382
+ !attributeReportsToSend[0].needSendNext))
383
+ ) {
328
384
  const { done, value } = payload.next();
329
385
  if (done) {
330
- // No more chunks to send
331
- delete dataReport.moreChunkedMessages;
332
- break;
386
+ allDataReceived = true;
387
+ if (attributeReportsToSend.length === 0 && eventReportsToSend.length === 0) {
388
+ // No more chunks to send and queue is empty, so we are done
389
+ delete dataReport.moreChunkedMessages;
390
+ break;
391
+ } else {
392
+ // We got all data, so only queue needs to be sent now, so flag all values to be sent next
393
+ // but leave moreChunkedMessages flag set because we do not know if all queue entries match
394
+ // into the message
395
+ for (const attributeReport of attributeReportsToSend) {
396
+ attributeReport.needSendNext = true;
397
+ }
398
+ continue;
399
+ }
333
400
  }
334
401
  if (value === undefined) {
402
+ // Should never happen but better handle here
335
403
  continue;
336
404
  }
405
+
337
406
  if ("attributeData" in value || "attributeStatus" in value) {
338
- attributeReportsToSend = [value];
407
+ // If read value is an attributeReport, encode it and add it to the queue
408
+ const allowMissingFieldsForNonFabricFilteredRead =
409
+ !forFabricFilteredRead && value.hasFabricSensitiveData;
410
+ const encoded = encodeAttributePayload(value, {
411
+ allowMissingFieldsForNonFabricFilteredRead,
412
+ });
413
+ const encodedSize = TlvAny.getEncodedByteLength(encoded);
414
+ if (attributeReportsToSend.length === 0) {
415
+ attributeReportsToSend.push({
416
+ attributeReport: value,
417
+ encoded,
418
+ encodedSize,
419
+ });
420
+ } else {
421
+ // Check if the new attribute belongs to the same endpoint and cluster as the first queued attribute
422
+ // Remove once https://github.com/project-chip/connectedhomeip/issues/37384 is fixed and some time passed
423
+ const firstQueuedAttributeData = attributeReportsToSend[0].attributeReport.attributeData;
424
+ if (
425
+ firstQueuedAttributeData !== undefined &&
426
+ value.attributeData !== undefined &&
427
+ firstQueuedAttributeData.path.nodeId === value.attributeData.path.nodeId &&
428
+ firstQueuedAttributeData.path.endpointId === value.attributeData.path.endpointId &&
429
+ firstQueuedAttributeData.path.clusterId === value.attributeData.path.clusterId
430
+ ) {
431
+ // Prioritize this attribute in queue because we know others are too big for current message
432
+ attributeReportsToSend.unshift({
433
+ attributeReport: value,
434
+ encoded,
435
+ encodedSize,
436
+ });
437
+ } else {
438
+ // No, we have a cluster change: Queue needs to go out next before we can process this one
439
+ // SO flag all queued entries to be sent next and add the new one to the end of the queue
440
+ for (const attributeReport of attributeReportsToSend) {
441
+ attributeReport.needSendNext = true;
442
+ }
443
+ attributeReportsToSend.push({
444
+ attributeReport: value,
445
+ encoded,
446
+ encodedSize,
447
+ });
448
+ }
449
+ }
339
450
  } else if ("eventData" in value || "eventStatus" in value) {
340
- eventReportsToSend = [value];
451
+ // If read value is an eventReport, encode it and add it to the queue
452
+ const allowMissingFieldsForNonFabricFilteredRead =
453
+ !forFabricFilteredRead && value.hasFabricSensitiveData;
454
+
455
+ const encoded = encodeEventPayload(value, { allowMissingFieldsForNonFabricFilteredRead });
456
+ const encodedSize = TlvAny.getEncodedByteLength(encoded);
457
+ eventReportsToSend.push({
458
+ eventReport: value,
459
+ encoded,
460
+ encodedSize,
461
+ });
341
462
  } else {
342
463
  throw new InternalError(`Invalid report type: ${value}`);
343
464
  }
@@ -345,53 +466,149 @@ export class InteractionServerMessenger extends InteractionMessenger {
345
466
 
346
467
  // If we have attribute data to send, we add them first
347
468
  if (attributeReportsToSend.length > 0) {
348
- const attributeReport = attributeReportsToSend.shift();
349
- if (attributeReport !== undefined) {
350
- if (dataReport.attributeReports === undefined) {
351
- messageSize += 3; // Array element is added now which needs 3 bytes
352
- }
353
- const allowMissingFieldsForNonFabricFilteredRead =
354
- !forFabricFilteredRead && attributeReport.hasFabricSensitiveData;
355
- const encodedAttribute = encodeAttributePayload(attributeReport, {
356
- allowMissingFieldsForNonFabricFilteredRead,
357
- });
358
- const attributeReportBytes = TlvAny.getEncodedByteLength(encodedAttribute);
359
- if (messageSize + attributeReportBytes > this.exchange.maxPayloadSize) {
360
- if (canAttributePayloadBeChunked(attributeReport)) {
361
- // Attribute is a non-empty array: chunk it and add the chunks to the beginning of the queue
362
- attributeReportsToSend.unshift(...chunkAttributePayload(attributeReport));
469
+ const attributeToSend = attributeReportsToSend.shift();
470
+ if (attributeToSend === undefined) {
471
+ continue; // should never happen, but better check
472
+ }
473
+
474
+ const { attributeReport, encoded, encodedSize, needSendNext } = attributeToSend;
475
+
476
+ /** Number of bytes available in the message. */
477
+ let availableBytes = this.exchange.maxPayloadSize - messageSize - 3; // 3 bytes for the attributeReports array
478
+
479
+ /** Does the message need to be sent out before we can send this packet? */
480
+ let sendOutTheMessage = false;
481
+ if (encodedSize > availableBytes) {
482
+ // This packet is too big for the current message ...
483
+ if ((allDataReceived || needSendNext) && canAttributePayloadBeChunked(attributeReport)) {
484
+ // Attribute is a non-empty array: chunk it and try to get as much as possible into the
485
+ // initial REPLACE ALL message and add rest to the queue
486
+ const chunks = chunkAttributePayload(attributeReport);
487
+
488
+ // Get the Array and the first data chunk of the list and pack them together.
489
+ // If this is already too big, it is more optimal to postpone this list completely to the next message
490
+ const initialChunk = chunks.shift(); // This is the empty array chunk
491
+ const firstDataChunk = chunks.shift(); // First data chunk
492
+ if (initialChunk === undefined || firstDataChunk === undefined) {
493
+ throw new InternalError(
494
+ "Chunked attribute payload is unexpected. This should not happen!",
495
+ );
496
+ }
497
+ initialChunk.attributeData!.payload.push(firstDataChunk.attributeData!.payload);
498
+
499
+ // Let's encode the initial REPLACE-ALL entry including one array entry
500
+ const allowMissingFieldsForNonFabricFilteredRead =
501
+ !forFabricFilteredRead && attributeReport.hasFabricSensitiveData;
502
+ const encodedInitialChunk = encodeAttributePayload(initialChunk, {
503
+ allowMissingFieldsForNonFabricFilteredRead,
504
+ });
505
+ const encodedInitialChunkSize = TlvAny.getEncodedByteLength(encodedInitialChunk);
506
+ if (availableBytes > encodedInitialChunkSize) {
507
+ // The initial chunk fits into the message, so lets see how much more we can add
508
+ availableBytes -= encodedInitialChunkSize;
509
+ messageSize += encodedInitialChunkSize;
510
+ while (chunks.length > 0) {
511
+ const nextChunk = chunks.shift();
512
+ if (nextChunk === undefined) {
513
+ throw new InternalError(
514
+ "Chunked attribute payload is undefined. This should not happen!",
515
+ );
516
+ }
517
+ const encodedChunkData = encodeAttributePayloadData(nextChunk, {
518
+ allowMissingFieldsForNonFabricFilteredRead,
519
+ });
520
+ const encodedChunkDataSize = TlvAny.getEncodedByteLength(encodedChunkData);
521
+ if (encodedChunkDataSize > availableBytes) {
522
+ // This chunks does not match anymore, put it and next chunks back to the queue
523
+ chunks.unshift(nextChunk);
524
+ for (let i = chunks.length - 1; i >= 0; i--) {
525
+ const chunk = chunks[i];
526
+ const encodedChunk = encodeAttributePayload(chunk, {
527
+ allowMissingFieldsForNonFabricFilteredRead,
528
+ });
529
+ const encodedChunkSize = TlvAny.getEncodedByteLength(encodedChunk);
530
+ attributeReportsToSend.unshift({
531
+ attributeReport: chunk,
532
+ encoded: encodedChunk,
533
+ encodedSize: encodedChunkSize,
534
+ needSendNext: true,
535
+ });
536
+ }
537
+ if (dataReport.attributeReports === undefined) {
538
+ dataReport.attributeReports = [];
539
+ }
540
+ dataReport.attributeReports.push(
541
+ encodeAttributePayload(initialChunk, {
542
+ allowMissingFieldsForNonFabricFilteredRead,
543
+ }),
544
+ );
545
+
546
+ break;
547
+ }
548
+ availableBytes -= encodedChunkDataSize;
549
+ messageSize += encodedChunkDataSize;
550
+ initialChunk.attributeData!.payload.push(nextChunk.attributeData!.payload);
551
+ }
363
552
  continue;
553
+ } else if (needSendNext) {
554
+ // The initial chunk does not fit into the message, but we need to send it next, flag that
555
+ sendOutTheMessage = true;
364
556
  }
557
+ } else {
558
+ // Current attribute is too big for the current message, and we can't/won't chunk it
559
+ if (needSendNext) {
560
+ // ... but if we need to send it now, flag that we need to send it next
561
+ sendOutTheMessage = true;
562
+ } else {
563
+ // ... otherwise we start filling up the queue
564
+ processQueueFirst = false;
565
+ }
566
+ }
567
+
568
+ let messageWasSent = false;
569
+ // If only 40 bytes are left, or we added a chunked array element as prio,
570
+ // or the queue has reached its maximum size, then we send the message now because it is full
571
+ if (
572
+ sendOutTheMessage ||
573
+ availableBytes < DATA_REPORT_MIN_AVAILABLE_BYTES_BEFORE_SENDING ||
574
+ (attributeReportsToSend.length > 0 && attributeReportsToSend[0].needSendNext) ||
575
+ attributeReportsToSend.length >= DATA_REPORT_MAX_QUEUED_ATTRIBUTE_MESSAGES
576
+ ) {
365
577
  await sendAndResetReport();
578
+ messageWasSent = true;
579
+ }
580
+ if (!messageWasSent) {
581
+ // We did not send the message, means assumption is that there is more space in the message
582
+ // So we add the current attribute to the end of the queue
583
+ attributeReportsToSend.push(attributeToSend);
584
+ continue;
366
585
  }
367
- messageSize += attributeReportBytes;
368
- if (dataReport.attributeReports === undefined) dataReport.attributeReports = [];
369
- dataReport.attributeReports.push(encodedAttribute);
370
586
  }
371
- } else if (eventReportsToSend.length > 0) {
372
- const eventReport = eventReportsToSend.shift();
373
- if (eventReport === undefined) {
374
- // No more chunks to send
375
- delete dataReport.moreChunkedMessages;
376
- break;
587
+ messageSize += encodedSize;
588
+ if (dataReport.attributeReports === undefined) {
589
+ dataReport.attributeReports = [];
377
590
  }
378
- if (dataReport.eventReports === undefined) {
379
- messageSize += 3; // Array element is added now which needs 3 bytes
591
+ dataReport.attributeReports.push(encoded);
592
+ } else if (eventReportsToSend.length > 0) {
593
+ const eventToSend = eventReportsToSend.shift();
594
+ if (eventToSend === undefined) {
595
+ continue;
380
596
  }
381
- const allowMissingFieldsForNonFabricFilteredRead =
382
- !forFabricFilteredRead && eventReport.hasFabricSensitiveData;
383
- const encodedEvent = encodeEventPayload(eventReport, {
384
- allowMissingFieldsForNonFabricFilteredRead,
385
- });
386
- const eventReportBytes = TlvAny.getEncodedByteLength(encodedEvent);
387
- if (messageSize + eventReportBytes > this.exchange.maxPayloadSize) {
597
+
598
+ const { encoded, encodedSize } = eventToSend;
599
+ if (
600
+ messageSize + 3 + (dataReport.attributeReports ? 3 : 0) + encodedSize >
601
+ this.exchange.maxPayloadSize
602
+ ) {
388
603
  await sendAndResetReport();
389
604
  }
390
- messageSize += eventReportBytes;
391
- if (dataReport.eventReports === undefined) dataReport.eventReports = [];
392
- dataReport.eventReports.push(encodedEvent);
393
- } else {
394
- // No more chunks to send
605
+ messageSize += encodedSize;
606
+ if (dataReport.eventReports === undefined) {
607
+ dataReport.eventReports = [];
608
+ }
609
+ dataReport.eventReports.push(encoded);
610
+ } else if (allDataReceived) {
611
+ // We have received all data and queue is empty, so we are done
395
612
  delete dataReport.moreChunkedMessages;
396
613
  break;
397
614
  }
@@ -811,11 +811,8 @@ export class ControllerCommissioningFlow {
811
811
  }),
812
812
  );
813
813
  } catch (error) {
814
- CommissioningError.accept(error);
815
- return {
816
- code: CommissioningStepResultCode.Failure,
817
- breadcrumb: this.#lastBreadcrumb,
818
- };
814
+ // convert error
815
+ throw repackErrorAs(error, RecoverableCommissioningError);
819
816
  }
820
817
 
821
818
  return {