@fluidframework/container-runtime 2.0.0-rc.3.0.3 → 2.0.0-rc.3.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/dist/channelCollection.js +1 -1
  2. package/dist/channelCollection.js.map +1 -1
  3. package/dist/containerRuntime.d.ts.map +1 -1
  4. package/dist/containerRuntime.js +30 -43
  5. package/dist/containerRuntime.js.map +1 -1
  6. package/dist/metadata.d.ts +2 -2
  7. package/dist/metadata.d.ts.map +1 -1
  8. package/dist/metadata.js.map +1 -1
  9. package/dist/opLifecycle/batchManager.d.ts +4 -1
  10. package/dist/opLifecycle/batchManager.d.ts.map +1 -1
  11. package/dist/opLifecycle/batchManager.js +0 -10
  12. package/dist/opLifecycle/batchManager.js.map +1 -1
  13. package/dist/opLifecycle/outbox.d.ts +0 -4
  14. package/dist/opLifecycle/outbox.d.ts.map +1 -1
  15. package/dist/opLifecycle/outbox.js +7 -38
  16. package/dist/opLifecycle/outbox.js.map +1 -1
  17. package/dist/packageVersion.d.ts +1 -1
  18. package/dist/packageVersion.js +1 -1
  19. package/dist/packageVersion.js.map +1 -1
  20. package/lib/channelCollection.js +1 -1
  21. package/lib/channelCollection.js.map +1 -1
  22. package/lib/containerRuntime.d.ts.map +1 -1
  23. package/lib/containerRuntime.js +30 -43
  24. package/lib/containerRuntime.js.map +1 -1
  25. package/lib/metadata.d.ts +2 -2
  26. package/lib/metadata.d.ts.map +1 -1
  27. package/lib/metadata.js.map +1 -1
  28. package/lib/opLifecycle/batchManager.d.ts +4 -1
  29. package/lib/opLifecycle/batchManager.d.ts.map +1 -1
  30. package/lib/opLifecycle/batchManager.js +0 -10
  31. package/lib/opLifecycle/batchManager.js.map +1 -1
  32. package/lib/opLifecycle/outbox.d.ts +0 -4
  33. package/lib/opLifecycle/outbox.d.ts.map +1 -1
  34. package/lib/opLifecycle/outbox.js +7 -38
  35. package/lib/opLifecycle/outbox.js.map +1 -1
  36. package/lib/packageVersion.d.ts +1 -1
  37. package/lib/packageVersion.js +1 -1
  38. package/lib/packageVersion.js.map +1 -1
  39. package/package.json +17 -17
  40. package/src/channelCollection.ts +1 -1
  41. package/src/containerRuntime.ts +42 -56
  42. package/src/metadata.ts +2 -2
  43. package/src/opLifecycle/README.md +4 -4
  44. package/src/opLifecycle/batchManager.ts +5 -14
  45. package/src/opLifecycle/outbox.ts +7 -53
  46. package/src/packageVersion.ts +1 -1
@@ -150,7 +150,7 @@ import {
150
150
  type OutboundContainerRuntimeMessage,
151
151
  type UnknownContainerRuntimeMessage,
152
152
  } from "./messageTypes.js";
153
- import { IBatchMetadata, IIdAllocationMetadata } from "./metadata.js";
153
+ import { IBatchMetadata, ISavedOpMetadata } from "./metadata.js";
154
154
  import {
155
155
  BatchMessage,
156
156
  IBatch,
@@ -671,11 +671,13 @@ type MessageWithContext =
671
671
  message: InboundSequencedContainerRuntimeMessage;
672
672
  modernRuntimeMessage: true;
673
673
  local: boolean;
674
+ savedOp?: boolean;
674
675
  }
675
676
  | {
676
677
  message: InboundSequencedContainerRuntimeMessageOrSystemMessage;
677
678
  modernRuntimeMessage: false;
678
679
  local: boolean;
680
+ savedOp?: boolean;
679
681
  };
680
682
 
681
683
  const summarizerRequestUrl = "_summarizer";
@@ -1128,7 +1130,7 @@ export class ContainerRuntime
1128
1130
  // Id Compressor serializes final state (see getPendingLocalState()). As result, it needs to skip all ops that preceeded that state
1129
1131
  // (such ops will be marked by Loader layer as savedOp === true)
1130
1132
  // That said, in "delayed" mode it's possible that Id Compressor was never initialized before getPendingLocalState() is called.
1131
- // In such case we have to process all ops, including those marked with saveOp === true.
1133
+ // In such case we have to process all ops, including those marked with savedOp === true.
1132
1134
  private readonly skipSavedCompressorOps: boolean;
1133
1135
 
1134
1136
  public get idCompressorMode() {
@@ -2318,6 +2320,7 @@ export class ContainerRuntime
2318
2320
  let newState: boolean;
2319
2321
 
2320
2322
  try {
2323
+ this.submitIdAllocationOpIfNeeded(true);
2321
2324
  // replay the ops
2322
2325
  this.pendingStateManager.replayPendingStates();
2323
2326
  } finally {
@@ -2540,21 +2543,28 @@ export class ContainerRuntime
2540
2543
  // We do not need to make a deep copy. Each layer will just replace message.contents itself,
2541
2544
  // but will not modify the contents object (likely it will replace it on the message).
2542
2545
  const messageCopy = { ...messageArg };
2546
+ const savedOp = (messageCopy.metadata as ISavedOpMetadata)?.savedOp;
2543
2547
  for (const message of this.remoteMessageProcessor.process(messageCopy)) {
2544
- if (modernRuntimeMessage) {
2545
- this.processCore({
2546
- // Cast it since we expect it to be this based on modernRuntimeMessage computation above.
2547
- // There is nothing really ensuring that anytime original message.type is Operation that
2548
- // the result messages will be so. In the end modern bool being true only directs to
2549
- // throw error if ultimately unrecognized without compat details saying otherwise.
2550
- message: message as InboundSequencedContainerRuntimeMessage,
2551
- local,
2552
- modernRuntimeMessage,
2553
- });
2554
- } else {
2555
- // Unrecognized message will be ignored.
2556
- this.processCore({ message, local, modernRuntimeMessage });
2557
- }
2548
+ const msg: MessageWithContext = modernRuntimeMessage
2549
+ ? {
2550
+ // Cast it since we expect it to be this based on modernRuntimeMessage computation above.
2551
+ // There is nothing really ensuring that anytime original message.type is Operation that
2552
+ // the result messages will be so. In the end modern bool being true only directs to
2553
+ // throw error if ultimately unrecognized without compat details saying otherwise.
2554
+ message: message as InboundSequencedContainerRuntimeMessage,
2555
+ local,
2556
+ modernRuntimeMessage,
2557
+ }
2558
+ : // Unrecognized message will be ignored.
2559
+ {
2560
+ message,
2561
+ local,
2562
+ modernRuntimeMessage,
2563
+ };
2564
+ msg.savedOp = savedOp;
2565
+
2566
+ // ensure that we observe any re-entrancy, and if needed, rebase ops
2567
+ this.ensureNoDataModelChanges(() => this.processCore(msg));
2558
2568
  }
2559
2569
  }
2560
2570
 
@@ -2642,13 +2652,7 @@ export class ContainerRuntime
2642
2652
  // stashed ops flow. The compressor is stashed with these ops already processed.
2643
2653
  // That said, in idCompressorMode === "delayed", we might not serialize ID compressor, and
2644
2654
  // thus we need to process all the ops.
2645
- if (
2646
- !(
2647
- this.skipSavedCompressorOps &&
2648
- (messageWithContext.message.metadata as IIdAllocationMetadata)?.savedOp ===
2649
- true
2650
- )
2651
- ) {
2655
+ if (!(this.skipSavedCompressorOps && messageWithContext.savedOp === true)) {
2652
2656
  const range = messageWithContext.message.contents;
2653
2657
  // Some other client turned on the id compressor. If we have not turned it on,
2654
2658
  // put it in a pending queue and delay finalization.
@@ -2809,9 +2813,9 @@ export class ContainerRuntime
2809
2813
  let checkpoint: IBatchCheckpoint | undefined;
2810
2814
  let result: T;
2811
2815
  if (this.mc.config.getBoolean("Fluid.ContainerRuntime.EnableRollback")) {
2812
- // Note: we are not touching this.pendingAttachBatch here, for two reasons:
2813
- // 1. It would not help, as we flush attach ops as they become available.
2814
- // 2. There is no way to undo process of data store creation.
2816
+ // Note: we are not touching any batches other than mainBatch here, for two reasons:
2817
+ // 1. It would not help, as other batches are flushed independently from main batch.
2818
+ // 2. There is no way to undo process of data store creation, blob creation, ID compressor ops, or other things tracked by other batches.
2815
2819
  checkpoint = this.outbox.checkpoint().mainBatch;
2816
2820
  }
2817
2821
  try {
@@ -3851,9 +3855,11 @@ export class ContainerRuntime
3851
3855
  return this.blobManager.createBlob(blob, signal);
3852
3856
  }
3853
3857
 
3854
- private submitIdAllocationOpIfNeeded(): void {
3858
+ private submitIdAllocationOpIfNeeded(resubmitOutstandingRanges = false): void {
3855
3859
  if (this._idCompressor) {
3856
- const idRange = this._idCompressor.takeNextCreationRange();
3860
+ const idRange = resubmitOutstandingRanges
3861
+ ? this.idCompressor?.takeUnfinalizedCreationRange()
3862
+ : this._idCompressor.takeNextCreationRange();
3857
3863
  // Don't include the idRange if there weren't any Ids allocated
3858
3864
  if (idRange?.ids !== undefined) {
3859
3865
  const idAllocationMessage: ContainerRuntimeIdAllocationMessage = {
@@ -3933,33 +3939,7 @@ export class ContainerRuntime
3933
3939
  });
3934
3940
  }
3935
3941
 
3936
- // If this is attach message for new data store, and we are in a batch, send this op out of order
3937
- // Is it safe:
3938
- // Yes, this should be safe reordering. Newly created data stores are not visible through API surface.
3939
- // They become visible only when aliased, or handle to some sub-element of newly created datastore
3940
- // is stored in some DDS, i.e. only after some other op.
3941
- // Why:
3942
- // Attach ops are large, and expensive to process. Plus there are scenarios where a lot of new data
3943
- // stores are created, causing issues like relay service throttling (too many ops) and catastrophic
3944
- // failure (batch is too large). Pushing them earlier and outside of main batch should alleviate
3945
- // these issues.
3946
- // Cons:
3947
- // 1. With large batches, relay service may throttle clients. Clients may disconnect while throttled.
3948
- // This change creates new possibility of a lot of newly created data stores never being referenced
3949
- // because client died before it had a change to submit the rest of the ops. This will create more
3950
- // garbage that needs to be collected leveraging GC (Garbage Collection) feature.
3951
- // 2. Sending ops out of order means they are excluded from rollback functionality. This is not an issue
3952
- // today as rollback can't undo creation of data store. To some extent not sending them is a bigger
3953
- // issue than sending.
3954
- // Please note that this does not change file format, so it can be disabled in the future if this
3955
- // optimization no longer makes sense (for example, batch compression may make it less appealing).
3956
- if (
3957
- this.currentlyBatching() &&
3958
- type === ContainerMessageType.Attach &&
3959
- this.disableAttachReorder !== true
3960
- ) {
3961
- this.outbox.submitAttach(message);
3962
- } else if (type === ContainerMessageType.BlobAttach) {
3942
+ if (type === ContainerMessageType.BlobAttach) {
3963
3943
  // BlobAttach ops must have their metadata visible and cannot be grouped (see opGroupingManager.ts)
3964
3944
  this.outbox.submitBlobAttach(message);
3965
3945
  } else {
@@ -4119,7 +4099,13 @@ export class ContainerRuntime
4119
4099
  this.channelCollection.reSubmit(message.type, message.contents, localOpMetadata);
4120
4100
  break;
4121
4101
  case ContainerMessageType.IdAllocation: {
4122
- this.submit(message, localOpMetadata);
4102
+ // Allocation ops are never resubmitted/rebased. This is because they require special handling to
4103
+ // avoid being submitted out of order. For example, if the pending state manager contained
4104
+ // [idOp1, dataOp1, idOp2, dataOp2] and the resubmission of dataOp1 generated idOp3, that would be
4105
+ // placed into the outbox in the same batch as idOp1, but before idOp2 is resubmitted.
4106
+ // To avoid this, allocation ops are simply never resubmitted. Prior to invoking the pending state
4107
+ // manager to replay pending ops, the runtime will always submit a new allocation range that includes
4108
+ // all pending IDs. The resubmitted allocation ops are then ignored here.
4123
4109
  break;
4124
4110
  }
4125
4111
  case ContainerMessageType.ChunkedOp:
package/src/metadata.ts CHANGED
@@ -19,8 +19,8 @@ export interface IBlobMetadata {
19
19
  }
20
20
 
21
21
  /**
22
- * The IdCompressor needs to know if this is a replayed savedOp as those need to be skipped in stashed ops scenarios.
22
+ * ContainerRuntime needs to know if this is a replayed savedOp as those need to be skipped in stashed ops scenarios.
23
23
  */
24
- export interface IIdAllocationMetadata {
24
+ export interface ISavedOpMetadata {
25
25
  savedOp?: boolean;
26
26
  }
@@ -339,19 +339,19 @@ stateDiagram-v2
339
339
  state "Store original (uncompressed, unchunked, ungrouped) batch locally" as store
340
340
  state if_compression <<choice>>
341
341
  [*] --> ContainerRuntime.submit
342
- ContainerRuntime.submit --> outbox.submitAttach
342
+ ContainerRuntime.submit --> outbox.submitIdAllocation
343
343
  ContainerRuntime.submit --> outbox.submitBlobAttach
344
344
  ContainerRuntime.submit --> outbox.submit
345
345
  outbox.submit --> scheduleFlush
346
- outbox.submitAttach --> scheduleFlush
346
+ outbox.submitIdAllocation --> scheduleFlush
347
347
  outbox.submitBlobAttach --> scheduleFlush
348
348
  scheduleFlush --> jsTurn
349
349
  jsTurn --> flush
350
350
  flush --> outbox.flushInternalMain
351
- flush --> outbox.flushInternalAttach
351
+ flush --> outbox.flushInternalIdAllocation
352
352
  flush --> outbox.flushInternalBlobAttach
353
353
  outbox.flushInternalMain --> flushInternal
354
- outbox.flushInternalAttach --> flushInternal
354
+ outbox.flushInternalIdAllocation --> flushInternal
355
355
  outbox.flushInternalBlobAttach --> flushInternal
356
356
  flushInternal --> ContainerRuntime.reSubmit: if batch has reentrant ops and should group
357
357
  ContainerRuntime.reSubmit --> flushInternal
@@ -9,8 +9,12 @@ import { BatchMessage, IBatch, IBatchCheckpoint } from "./definitions.js";
9
9
 
10
10
  export interface IBatchManagerOptions {
11
11
  readonly hardLimit: number;
12
- readonly softLimit?: number;
13
12
  readonly compressionOptions?: ICompressionRuntimeOptions;
13
+
14
+ /**
15
+ * If true, the outbox is allowed to rebase the batch during flushing.
16
+ */
17
+ readonly canRebase: boolean;
14
18
  }
15
19
 
16
20
  export interface BatchSequenceNumbers {
@@ -72,19 +76,6 @@ export class BatchManager {
72
76
  // initially stored as base64, and that requires only 2 extra escape characters.
73
77
  const socketMessageSize = contentSize + opOverhead * opCount;
74
78
 
75
- // If we were provided soft limit, check for exceeding it.
76
- // But only if we have any ops, as the intention here is to flush existing ops (on exceeding this limit)
77
- // and start over. That's not an option if we have no ops.
78
- // If compression is enabled, the soft and hard limit are ignored and the message will be pushed anyways.
79
- // Cases where the message is still too large will be handled by the maxConsecutiveReconnects path.
80
- if (
81
- this.options.softLimit !== undefined &&
82
- this.length > 0 &&
83
- socketMessageSize >= this.options.softLimit
84
- ) {
85
- return false;
86
- }
87
-
88
79
  if (socketMessageSize >= this.options.hardLimit) {
89
80
  return false;
90
81
  }
@@ -89,11 +89,9 @@ export function getLongStack<T>(action: () => T, length: number = 50): T {
89
89
 
90
90
  export class Outbox {
91
91
  private readonly mc: MonitoringContext;
92
- private readonly attachFlowBatch: BatchManager;
93
92
  private readonly mainBatch: BatchManager;
94
93
  private readonly blobAttachBatch: BatchManager;
95
94
  private readonly idAllocationBatch: BatchManager;
96
- private readonly defaultAttachFlowSoftLimitInBytes = 320 * 1024;
97
95
  private batchRebasesToReport = 5;
98
96
  private rebasing = false;
99
97
 
@@ -113,21 +111,14 @@ export class Outbox {
113
111
  Number.POSITIVE_INFINITY;
114
112
  // We need to allow infinite size batches if we enable compression
115
113
  const hardLimit = isCompressionEnabled ? Infinity : this.params.config.maxBatchSizeInBytes;
116
- const softLimit = isCompressionEnabled ? Infinity : this.defaultAttachFlowSoftLimitInBytes;
117
114
 
118
- this.attachFlowBatch = new BatchManager({ hardLimit, softLimit });
119
- this.mainBatch = new BatchManager({ hardLimit });
120
- this.blobAttachBatch = new BatchManager({ hardLimit });
121
- this.idAllocationBatch = new BatchManager({ hardLimit });
115
+ this.mainBatch = new BatchManager({ hardLimit, canRebase: true });
116
+ this.blobAttachBatch = new BatchManager({ hardLimit, canRebase: true });
117
+ this.idAllocationBatch = new BatchManager({ hardLimit, canRebase: false });
122
118
  }
123
119
 
124
120
  public get messageCount(): number {
125
- return (
126
- this.attachFlowBatch.length +
127
- this.mainBatch.length +
128
- this.blobAttachBatch.length +
129
- this.idAllocationBatch.length
130
- );
121
+ return this.mainBatch.length + this.blobAttachBatch.length + this.idAllocationBatch.length;
131
122
  }
132
123
 
133
124
  public get isEmpty(): boolean {
@@ -142,13 +133,11 @@ export class Outbox {
142
133
  */
143
134
  private maybeFlushPartialBatch() {
144
135
  const mainBatchSeqNums = this.mainBatch.sequenceNumbers;
145
- const attachFlowBatchSeqNums = this.attachFlowBatch.sequenceNumbers;
146
136
  const blobAttachSeqNums = this.blobAttachBatch.sequenceNumbers;
147
137
  const idAllocSeqNums = this.idAllocationBatch.sequenceNumbers;
148
138
  assert(
149
139
  this.params.config.disablePartialFlush ||
150
- (sequenceNumbersMatch(mainBatchSeqNums, attachFlowBatchSeqNums) &&
151
- sequenceNumbersMatch(mainBatchSeqNums, blobAttachSeqNums) &&
140
+ (sequenceNumbersMatch(mainBatchSeqNums, blobAttachSeqNums) &&
152
141
  sequenceNumbersMatch(mainBatchSeqNums, idAllocSeqNums)),
153
142
  0x58d /* Reference sequence numbers from both batches must be in sync */,
154
143
  );
@@ -157,7 +146,6 @@ export class Outbox {
157
146
 
158
147
  if (
159
148
  sequenceNumbersMatch(mainBatchSeqNums, currentSequenceNumbers) &&
160
- sequenceNumbersMatch(attachFlowBatchSeqNums, currentSequenceNumbers) &&
161
149
  sequenceNumbersMatch(blobAttachSeqNums, currentSequenceNumbers) &&
162
150
  sequenceNumbersMatch(idAllocSeqNums, currentSequenceNumbers)
163
151
  ) {
@@ -172,8 +160,6 @@ export class Outbox {
172
160
  eventName: "ReferenceSequenceNumberMismatch",
173
161
  mainReferenceSequenceNumber: mainBatchSeqNums.referenceSequenceNumber,
174
162
  mainClientSequenceNumber: mainBatchSeqNums.clientSequenceNumber,
175
- attachReferenceSequenceNumber: attachFlowBatchSeqNums.referenceSequenceNumber,
176
- attachClientSequenceNumber: attachFlowBatchSeqNums.clientSequenceNumber,
177
163
  blobAttachReferenceSequenceNumber: blobAttachSeqNums.referenceSequenceNumber,
178
164
  blobAttachClientSequenceNumber: blobAttachSeqNums.clientSequenceNumber,
179
165
  currentReferenceSequenceNumber: currentSequenceNumbers.referenceSequenceNumber,
@@ -194,37 +180,6 @@ export class Outbox {
194
180
  this.addMessageToBatchManager(this.mainBatch, message);
195
181
  }
196
182
 
197
- public submitAttach(message: BatchMessage) {
198
- this.maybeFlushPartialBatch();
199
-
200
- if (
201
- !this.attachFlowBatch.push(
202
- message,
203
- this.isContextReentrant(),
204
- this.params.getCurrentSequenceNumbers().clientSequenceNumber,
205
- )
206
- ) {
207
- // BatchManager has two limits - soft limit & hard limit. Soft limit is only engaged
208
- // when queue is not empty.
209
- // Flush queue & retry. Failure on retry would mean - single message is bigger than hard limit
210
- this.flushInternal(this.attachFlowBatch);
211
-
212
- this.addMessageToBatchManager(this.attachFlowBatch, message);
213
- }
214
-
215
- // If compression is enabled, we will always successfully receive
216
- // attach ops and compress then send them at the next JS turn, regardless
217
- // of the overall size of the accumulated ops in the batch.
218
- // However, it is more efficient to flush these ops faster, preferably
219
- // after they reach a size which would benefit from compression.
220
- if (
221
- this.attachFlowBatch.contentSizeInBytes >=
222
- this.params.config.compressionOptions.minimumBatchSizeInBytes
223
- ) {
224
- this.flushInternal(this.attachFlowBatch);
225
- }
226
- }
227
-
228
183
  public submitBlobAttach(message: BatchMessage) {
229
184
  this.maybeFlushPartialBatch();
230
185
 
@@ -303,7 +258,6 @@ export class Outbox {
303
258
 
304
259
  private flushAll() {
305
260
  this.flushInternal(this.idAllocationBatch);
306
- this.flushInternal(this.attachFlowBatch);
307
261
  this.flushInternal(this.blobAttachBatch, true /* disableGroupedBatching */);
308
262
  this.flushInternal(this.mainBatch);
309
263
  }
@@ -316,7 +270,7 @@ export class Outbox {
316
270
  const rawBatch = batchManager.popBatch();
317
271
  const shouldGroup =
318
272
  !disableGroupedBatching && this.params.groupingManager.shouldGroup(rawBatch);
319
- if (rawBatch.hasReentrantOps === true && shouldGroup) {
273
+ if (batchManager.options.canRebase && rawBatch.hasReentrantOps === true && shouldGroup) {
320
274
  assert(!this.rebasing, 0x6fa /* A rebased batch should never have reentrant ops */);
321
275
  // If a batch contains reentrant ops (ops created as a result from processing another op)
322
276
  // it needs to be rebased so that we can ensure consistent reference sequence numbers
@@ -346,6 +300,7 @@ export class Outbox {
346
300
  */
347
301
  private rebase(rawBatch: IBatch, batchManager: BatchManager) {
348
302
  assert(!this.rebasing, 0x6fb /* Reentrancy */);
303
+ assert(batchManager.options.canRebase, "BatchManager does not support rebase");
349
304
 
350
305
  this.rebasing = true;
351
306
  for (const message of rawBatch.content) {
@@ -479,7 +434,6 @@ export class Outbox {
479
434
  const mainBatch: IBatchCheckpoint = this.mainBatch.checkpoint();
480
435
  return {
481
436
  mainBatch,
482
- attachFlowBatch: this.attachFlowBatch.checkpoint(),
483
437
  blobAttachBatch: this.blobAttachBatch.checkpoint(),
484
438
  };
485
439
  }
@@ -6,4 +6,4 @@
6
6
  */
7
7
 
8
8
  export const pkgName = "@fluidframework/container-runtime";
9
- export const pkgVersion = "2.0.0-rc.3.0.3";
9
+ export const pkgVersion = "2.0.0-rc.3.0.4";