@firebase/firestore 4.7.16 → 4.7.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs.js CHANGED
@@ -9,7 +9,7 @@ var util = require('@firebase/util');
9
9
  var bloomBlob = require('@firebase/webchannel-wrapper/bloom-blob');
10
10
  var webchannelBlob = require('@firebase/webchannel-wrapper/webchannel-blob');
11
11
 
12
- const F = "@firebase/firestore", M = "4.7.16";
12
+ const F = "@firebase/firestore", M = "4.7.17";
13
13
 
14
14
  /**
15
15
  * @license
@@ -71,7 +71,7 @@ User.MOCK_USER = new User("mock-user");
71
71
  * See the License for the specific language governing permissions and
72
72
  * limitations under the License.
73
73
  */
74
- let x = "11.8.1";
74
+ let x = "11.9.0";
75
75
 
76
76
  /**
77
77
  * @license
@@ -2438,7 +2438,7 @@ function __PRIVATE_newDbDocumentMutationPrefixForPath(e, t) {
2438
2438
  * there is no useful information to store as the value. The raw (unencoded)
2439
2439
  * path cannot be stored because IndexedDb doesn't store prototype
2440
2440
  * information.
2441
- */ const re = {}, ie = "documentMutations", se = "remoteDocumentsV14", oe = [ "prefixPath", "collectionGroup", "readTime", "documentId" ], _e = "documentKeyIndex", ae = [ "prefixPath", "collectionGroup", "documentId" ], ue = "collectionGroupIndex", ce = [ "collectionGroup", "readTime", "prefixPath", "documentId" ], le = "remoteDocumentGlobal", he = "remoteDocumentGlobalKey", Pe = "targets", Te = "queryTargetsIndex", Ie = [ "canonicalId", "targetId" ], Ee = "targetDocuments", de = [ "targetId", "path" ], Ae = "documentTargetsIndex", Re = [ "path", "targetId" ], Ve = "targetGlobalKey", me = "targetGlobal", fe = "collectionParents", ge = [ "collectionId", "parent" ], pe = "clientMetadata", ye = "clientId", we = "bundles", Se = "bundleId", be = "namedQueries", De = "name", ve = "indexConfiguration", Ce = "indexId", Fe = "collectionGroupIndex", Me = "collectionGroup", xe = "indexState", Oe = [ "indexId", "uid" ], Ne = "sequenceNumberIndex", Be = [ "uid", "sequenceNumber" ], Le = "indexEntries", ke = [ "indexId", "uid", "arrayValue", "directionalValue", "orderedDocumentKey", "documentKey" ], qe = "documentKeyIndex", Qe = [ "indexId", "uid", "orderedDocumentKey" ], $e = "documentOverlays", Ue = [ "userId", "collectionPath", "documentId" ], Ke = "collectionPathOverlayIndex", We = [ "userId", "collectionPath", "largestBatchId" ], Ge = "collectionGroupOverlayIndex", ze = [ "userId", "collectionGroup", "largestBatchId" ], je = "globals", He = "name", Je = [ ...[ ...[ ...[ ...[ Y, X, ie, j, Pe, H, me, Ee ], pe ], le ], fe ], we, be ], Ye = [ ...Je, $e ], Ze = [ Y, X, ie, se, Pe, H, me, Ee, pe, le, fe, we, be, $e ], Xe = Ze, et = [ ...Xe, ve, xe, Le ], tt = et, nt = [ ...et, je ], rt = nt;
2441
+ */ const re = {}, ie = "documentMutations", se = "remoteDocumentsV14", oe = [ "prefixPath", "collectionGroup", "readTime", "documentId" ], _e = "documentKeyIndex", ae = [ "prefixPath", "collectionGroup", "documentId" ], ue = "collectionGroupIndex", ce = [ "collectionGroup", "readTime", "prefixPath", "documentId" ], le = "remoteDocumentGlobal", he = "remoteDocumentGlobalKey", Pe = "targets", Te = "queryTargetsIndex", Ie = [ "canonicalId", "targetId" ], Ee = "targetDocuments", de = [ "targetId", "path" ], Ae = "documentTargetsIndex", Re = [ "path", "targetId" ], Ve = "targetGlobalKey", me = "targetGlobal", fe = "collectionParents", ge = [ "collectionId", "parent" ], pe = "clientMetadata", ye = "clientId", we = "bundles", be = "bundleId", Se = "namedQueries", De = "name", ve = "indexConfiguration", Ce = "indexId", Fe = "collectionGroupIndex", Me = "collectionGroup", xe = "indexState", Oe = [ "indexId", "uid" ], Ne = "sequenceNumberIndex", Be = [ "uid", "sequenceNumber" ], Le = "indexEntries", ke = [ "indexId", "uid", "arrayValue", "directionalValue", "orderedDocumentKey", "documentKey" ], qe = "documentKeyIndex", Qe = [ "indexId", "uid", "orderedDocumentKey" ], $e = "documentOverlays", Ue = [ "userId", "collectionPath", "documentId" ], Ke = "collectionPathOverlayIndex", We = [ "userId", "collectionPath", "largestBatchId" ], Ge = "collectionGroupOverlayIndex", ze = [ "userId", "collectionGroup", "largestBatchId" ], je = "globals", He = "name", Je = [ ...[ ...[ ...[ ...[ Y, X, ie, j, Pe, H, me, Ee ], pe ], le ], fe ], we, Se ], Ye = [ ...Je, $e ], Ze = [ Y, X, ie, se, Pe, H, me, Ee, pe, le, fe, we, Se, $e ], Xe = Ze, et = [ ...Xe, ve, xe, Le ], tt = et, nt = [ ...et, je ], rt = nt;
2442
2442
 
2443
2443
  /**
2444
2444
  * @license
@@ -6051,7 +6051,7 @@ class BloomFilter {
6051
6051
  r.modulo(this.ye).toNumber();
6052
6052
  }
6053
6053
  // Return whether the bit on the given index in the bitmap is set to 1.
6054
- Se(e) {
6054
+ be(e) {
6055
6055
  return !!(this.bitmap[Math.floor(e / 8)] & 1 << e % 8);
6056
6056
  }
6057
6057
  mightContain(e) {
@@ -6060,7 +6060,7 @@ class BloomFilter {
6060
6060
  const t = __PRIVATE_getMd5HashValue(e), [n, r] = __PRIVATE_get64BitUints(t);
6061
6061
  for (let e = 0; e < this.hashCount; e++) {
6062
6062
  const t = this.we(n, r, e);
6063
- if (!this.Se(t)) return !1;
6063
+ if (!this.be(t)) return !1;
6064
6064
  }
6065
6065
  return !0;
6066
6066
  }
@@ -6073,10 +6073,10 @@ class BloomFilter {
6073
6073
  const t = __PRIVATE_getMd5HashValue(e), [n, r] = __PRIVATE_get64BitUints(t);
6074
6074
  for (let e = 0; e < this.hashCount; e++) {
6075
6075
  const t = this.we(n, r, e);
6076
- this.be(t);
6076
+ this.Se(t);
6077
6077
  }
6078
6078
  }
6079
- be(e) {
6079
+ Se(e) {
6080
6080
  const t = Math.floor(e / 8), n = e % 8;
6081
6081
  this.bitmap[t] |= 1 << n;
6082
6082
  }
@@ -6703,7 +6703,7 @@ const wt = (() => {
6703
6703
  desc: "DESCENDING"
6704
6704
  };
6705
6705
  return e;
6706
- })(), St = (() => {
6706
+ })(), bt = (() => {
6707
6707
  const e = {
6708
6708
  "<": "LESS_THAN",
6709
6709
  "<=": "LESS_THAN_OR_EQUAL",
@@ -6717,7 +6717,7 @@ const wt = (() => {
6717
6717
  "array-contains-any": "ARRAY_CONTAINS_ANY"
6718
6718
  };
6719
6719
  return e;
6720
- })(), bt = (() => {
6720
+ })(), St = (() => {
6721
6721
  const e = {
6722
6722
  and: "AND",
6723
6723
  or: "OR"
@@ -7322,11 +7322,11 @@ function __PRIVATE_toDirection(e) {
7322
7322
  }
7323
7323
 
7324
7324
  function __PRIVATE_toOperatorName(e) {
7325
- return St[e];
7325
+ return bt[e];
7326
7326
  }
7327
7327
 
7328
7328
  function __PRIVATE_toCompositeOperatorName(e) {
7329
- return bt[e];
7329
+ return St[e];
7330
7330
  }
7331
7331
 
7332
7332
  function __PRIVATE_toFieldPathReference(e) {
@@ -7737,7 +7737,7 @@ function __PRIVATE_toDbIndexState(e, t, n, r) {
7737
7737
  /**
7738
7738
  * Helper to get a typed SimpleDbStore for the namedQueries object store.
7739
7739
  */ function __PRIVATE_namedQueriesStore(e) {
7740
- return __PRIVATE_getStore(e, be);
7740
+ return __PRIVATE_getStore(e, Se);
7741
7741
  }
7742
7742
 
7743
7743
  /**
@@ -7766,7 +7766,7 @@ function __PRIVATE_toDbIndexState(e, t, n, r) {
7766
7766
  constructor(e, t) {
7767
7767
  this.serializer = e, this.userId = t;
7768
7768
  }
7769
- static St(e, t) {
7769
+ static bt(e, t) {
7770
7770
  const n = t.uid || "";
7771
7771
  return new __PRIVATE_IndexedDbDocumentOverlayCache(e, n);
7772
7772
  }
@@ -7783,7 +7783,7 @@ function __PRIVATE_toDbIndexState(e, t, n, r) {
7783
7783
  const r = [];
7784
7784
  return n.forEach(((n, i) => {
7785
7785
  const s = new Overlay(t, i);
7786
- r.push(this.bt(e, s));
7786
+ r.push(this.St(e, s));
7787
7787
  })), PersistencePromise.waitFor(r);
7788
7788
  }
7789
7789
  removeOverlaysForBatchId(e, t, n) {
@@ -7828,7 +7828,7 @@ function __PRIVATE_toDbIndexState(e, t, n, r) {
7828
7828
  i.size() < r || o.largestBatchId === s ? (i.set(o.getKey(), o), s = o.largestBatchId) : n.done();
7829
7829
  })).next((() => i));
7830
7830
  }
7831
- bt(e, t) {
7831
+ St(e, t) {
7832
7832
  return __PRIVATE_documentOverlayStore(e).put(function __PRIVATE_toDbDocumentOverlay(e, t, n) {
7833
7833
  const [r, i, s] = __PRIVATE_toDbDocumentOverlayKey(t, n.mutation.key);
7834
7834
  return {
@@ -8406,11 +8406,11 @@ class __PRIVATE_TargetIndexMatcher {
8406
8406
  return !1;
8407
8407
  // If there is an array element, find a matching filter.
8408
8408
  const t = __PRIVATE_fieldIndexGetArraySegment(e);
8409
- if (void 0 !== t && !this.Sn(t)) return !1;
8409
+ if (void 0 !== t && !this.bn(t)) return !1;
8410
8410
  const n = __PRIVATE_fieldIndexGetDirectionalSegments(e);
8411
8411
  let r = new Set, i = 0, s = 0;
8412
8412
  // Process all equalities first. Equalities can appear out of order.
8413
- for (;i < n.length && this.Sn(n[i]); ++i) r = r.add(n[i].fieldPath.canonicalString());
8413
+ for (;i < n.length && this.bn(n[i]); ++i) r = r.add(n[i].fieldPath.canonicalString());
8414
8414
  // If we already have processed all segments, all segments are used to serve
8415
8415
  // the equality filters and we do not need to map any segments to the
8416
8416
  // target's inequality and orderBy clauses.
@@ -8423,7 +8423,7 @@ class __PRIVATE_TargetIndexMatcher {
8423
8423
  // and the first orderBy clause.
8424
8424
  if (!r.has(e.field.canonicalString())) {
8425
8425
  const t = n[i];
8426
- if (!this.bn(e, t) || !this.Dn(this.gn[s++], t)) return !1;
8426
+ if (!this.Sn(e, t) || !this.Dn(this.gn[s++], t)) return !1;
8427
8427
  }
8428
8428
  ++i;
8429
8429
  }
@@ -8465,11 +8465,11 @@ class __PRIVATE_TargetIndexMatcher {
8465
8465
  n.field.isKeyField() || e.has(n.field) || (e = e.add(n.field), t.push(new IndexSegment(n.field, "asc" /* Direction.ASCENDING */ === n.dir ? 0 /* IndexKind.ASCENDING */ : 1 /* IndexKind.DESCENDING */)));
8466
8466
  return new FieldIndex(FieldIndex.UNKNOWN_ID, this.collectionId, t, IndexState.empty());
8467
8467
  }
8468
- Sn(e) {
8469
- for (const t of this.pn) if (this.bn(t, e)) return !0;
8468
+ bn(e) {
8469
+ for (const t of this.pn) if (this.Sn(t, e)) return !0;
8470
8470
  return !1;
8471
8471
  }
8472
- bn(e, t) {
8472
+ Sn(e, t) {
8473
8473
  if (void 0 === e || !e.field.isEqual(t.fieldPath)) return !1;
8474
8474
  const n = "array-contains" /* Operator.ARRAY_CONTAINS */ === e.op || "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */ === e.op;
8475
8475
  return 2 /* IndexKind.CONTAINS */ === t.kind === n;
@@ -9439,7 +9439,7 @@ class __PRIVATE_IndexedDbMutationQueue {
9439
9439
  * Creates a new mutation queue for the given user.
9440
9440
  * @param user - The user for which to create a mutation queue.
9441
9441
  * @param serializer - The serializer to use when persisting to IndexedDb.
9442
- */ static St(e, t, n, r) {
9442
+ */ static bt(e, t, n, r) {
9443
9443
  // TODO(mcg): Figure out what constraints there are on userIDs
9444
9444
  // In particular, are there any reserved characters? are empty ids allowed?
9445
9445
  // For the moment store these together in the same mutations table assuming
@@ -10123,10 +10123,10 @@ function __PRIVATE_newLruGarbageCollector(e, t) {
10123
10123
  this.db = e, this.garbageCollector = __PRIVATE_newLruGarbageCollector(this, t);
10124
10124
  }
10125
10125
  pr(e) {
10126
- const t = this.Sr(e);
10126
+ const t = this.br(e);
10127
10127
  return this.db.getTargetCache().getTargetCount(e).next((e => t.next((t => e + t))));
10128
10128
  }
10129
- Sr(e) {
10129
+ br(e) {
10130
10130
  let t = 0;
10131
10131
  return this.yr(e, (e => {
10132
10132
  t++;
@@ -10136,7 +10136,7 @@ function __PRIVATE_newLruGarbageCollector(e, t) {
10136
10136
  return this.db.getTargetCache().forEachTarget(e, t);
10137
10137
  }
10138
10138
  yr(e, t) {
10139
- return this.br(e, ((e, n) => t(n)));
10139
+ return this.Sr(e, ((e, n) => t(n)));
10140
10140
  }
10141
10141
  addReference(e, t, n) {
10142
10142
  return __PRIVATE_writeSentinelKey(e, n);
@@ -10165,7 +10165,7 @@ function __PRIVATE_newLruGarbageCollector(e, t) {
10165
10165
  removeOrphanedDocuments(e, t) {
10166
10166
  const n = this.db.getRemoteDocumentCache().newChangeBuffer(), r = [];
10167
10167
  let i = 0;
10168
- return this.br(e, ((s, o) => {
10168
+ return this.Sr(e, ((s, o) => {
10169
10169
  if (o <= t) {
10170
10170
  const t = this.Dr(e, s).next((t => {
10171
10171
  if (!t)
@@ -10196,7 +10196,7 @@ function __PRIVATE_newLruGarbageCollector(e, t) {
10196
10196
  * means not a part of any target, so the only entry in the target-document index for
10197
10197
  * that document will be the sentinel row (targetId 0), which will also have the sequence
10198
10198
  * number for the last time the document was accessed.
10199
- */ br(e, t) {
10199
+ */ Sr(e, t) {
10200
10200
  const n = __PRIVATE_documentTargetStore(e);
10201
10201
  let r, i = __PRIVATE_ListenSequence.le;
10202
10202
  return n.te({
@@ -11032,7 +11032,7 @@ class OverlayedDocument {
11032
11032
  }
11033
11033
  saveOverlays(e, t, n) {
11034
11034
  return n.forEach(((n, r) => {
11035
- this.bt(e, t, r);
11035
+ this.St(e, t, r);
11036
11036
  })), PersistencePromise.resolve();
11037
11037
  }
11038
11038
  removeOverlaysForBatchId(e, t, n) {
@@ -11067,7 +11067,7 @@ class OverlayedDocument {
11067
11067
  }
11068
11068
  return PersistencePromise.resolve(o);
11069
11069
  }
11070
- bt(e, t, n) {
11070
+ St(e, t, n) {
11071
11071
  // Remove the association of the overlay to its batch id.
11072
11072
  const r = this.overlays.get(n.key);
11073
11073
  if (null !== r) {
@@ -11780,10 +11780,10 @@ class __PRIVATE_MemoryLruDelegate {
11780
11780
  return this.persistence.getTargetCache().forEachTarget(e, t);
11781
11781
  }
11782
11782
  pr(e) {
11783
- const t = this.Sr(e);
11783
+ const t = this.br(e);
11784
11784
  return this.persistence.getTargetCache().getTargetCount(e).next((e => t.next((t => e + t))));
11785
11785
  }
11786
- Sr(e) {
11786
+ br(e) {
11787
11787
  let t = 0;
11788
11788
  return this.yr(e, (e => {
11789
11789
  t++;
@@ -11931,7 +11931,7 @@ class __PRIVATE_MemoryLruDelegate {
11931
11931
  }(e);
11932
11932
  }))), n < 5 && r >= 5 && (s = s.next((() => this.wi(i)))), n < 6 && r >= 6 && (s = s.next((() => (function __PRIVATE_createDocumentGlobalStore(e) {
11933
11933
  e.createObjectStore(le);
11934
- }(e), this.Si(i))))), n < 7 && r >= 7 && (s = s.next((() => this.bi(i)))), n < 8 && r >= 8 && (s = s.next((() => this.Di(e, i)))),
11934
+ }(e), this.bi(i))))), n < 7 && r >= 7 && (s = s.next((() => this.Si(i)))), n < 8 && r >= 8 && (s = s.next((() => this.Di(e, i)))),
11935
11935
  n < 9 && r >= 9 && (s = s.next((() => {
11936
11936
  // Multi-Tab used to manage its own changelog, but this has been moved
11937
11937
  // to the DbRemoteDocument object store itself. Since the previous change
@@ -11944,10 +11944,10 @@ class __PRIVATE_MemoryLruDelegate {
11944
11944
  }))), n < 10 && r >= 10 && (s = s.next((() => this.Ci(i)))), n < 11 && r >= 11 && (s = s.next((() => {
11945
11945
  !function __PRIVATE_createBundlesStore(e) {
11946
11946
  e.createObjectStore(we, {
11947
- keyPath: Se
11947
+ keyPath: be
11948
11948
  });
11949
11949
  }(e), function __PRIVATE_createNamedQueriesStore(e) {
11950
- e.createObjectStore(be, {
11950
+ e.createObjectStore(Se, {
11951
11951
  keyPath: De
11952
11952
  });
11953
11953
  }(e);
@@ -12006,7 +12006,7 @@ class __PRIVATE_MemoryLruDelegate {
12006
12006
  t.objectStore(Le).clear();
12007
12007
  }))), s;
12008
12008
  }
12009
- Si(e) {
12009
+ bi(e) {
12010
12010
  let t = 0;
12011
12011
  return e.store(j).te(((e, n) => {
12012
12012
  t += __PRIVATE_dbDocumentSize(n);
@@ -12033,7 +12033,7 @@ class __PRIVATE_MemoryLruDelegate {
12033
12033
  /**
12034
12034
  * Ensures that every document in the remote document cache has a corresponding sentinel row
12035
12035
  * with a sequence number. Missing rows are given the most recently used sequence number.
12036
- */ bi(e) {
12036
+ */ Si(e) {
12037
12037
  const t = e.store(Ee), n = e.store(j);
12038
12038
  return e.store(me).get(Ve).next((e => {
12039
12039
  const r = [];
@@ -12128,7 +12128,7 @@ class __PRIVATE_MemoryLruDelegate {
12128
12128
  __PRIVATE_fromDbMutationBatch(this.serializer, e).keys().forEach((e => r = r.add(e))),
12129
12129
  n.set(e.userId, r);
12130
12130
  })), PersistencePromise.forEach(n, ((e, n) => {
12131
- const s = new User(n), o = __PRIVATE_IndexedDbDocumentOverlayCache.St(this.serializer, s), _ = i.getIndexManager(s), a = __PRIVATE_IndexedDbMutationQueue.St(s, this.serializer, _, i.referenceDelegate);
12131
+ const s = new User(n), o = __PRIVATE_IndexedDbDocumentOverlayCache.bt(this.serializer, s), _ = i.getIndexManager(s), a = __PRIVATE_IndexedDbMutationQueue.bt(s, this.serializer, _, i.referenceDelegate);
12132
12132
  return new LocalDocumentsView(r, a, o, _).recalculateAndSaveOverlaysForDocumentKeys(new __PRIVATE_IndexedDbTransaction(t, __PRIVATE_ListenSequence.le), e).next();
12133
12133
  }));
12134
12134
  }));
@@ -12437,7 +12437,7 @@ class __PRIVATE_IndexedDbPersistence {
12437
12437
  return this.Pi;
12438
12438
  }
12439
12439
  getMutationQueue(e, t) {
12440
- return __PRIVATE_IndexedDbMutationQueue.St(e, this.serializer, t, this.referenceDelegate);
12440
+ return __PRIVATE_IndexedDbMutationQueue.bt(e, this.serializer, t, this.referenceDelegate);
12441
12441
  }
12442
12442
  getTargetCache() {
12443
12443
  return this.Ti;
@@ -12449,7 +12449,7 @@ class __PRIVATE_IndexedDbPersistence {
12449
12449
  return new __PRIVATE_IndexedDbIndexManager(e, this.serializer.wt.databaseId);
12450
12450
  }
12451
12451
  getDocumentOverlayCache(e) {
12452
- return __PRIVATE_IndexedDbDocumentOverlayCache.St(this.serializer, e);
12452
+ return __PRIVATE_IndexedDbDocumentOverlayCache.bt(this.serializer, e);
12453
12453
  }
12454
12454
  getBundleCache() {
12455
12455
  return this.Ei;
@@ -12769,13 +12769,13 @@ class __PRIVATE_QueryEngine {
12769
12769
  return this.ws(e, t).next((e => {
12770
12770
  i.result = e;
12771
12771
  })).next((() => {
12772
- if (!i.result) return this.Ss(e, t, r, n).next((e => {
12772
+ if (!i.result) return this.bs(e, t, r, n).next((e => {
12773
12773
  i.result = e;
12774
12774
  }));
12775
12775
  })).next((() => {
12776
12776
  if (i.result) return;
12777
12777
  const n = new QueryContext;
12778
- return this.bs(e, t, n).next((r => {
12778
+ return this.Ss(e, t, n).next((r => {
12779
12779
  if (i.result = r, this.fs) return this.Ds(e, t, n, r.size);
12780
12780
  }));
12781
12781
  })).next((() => i.result));
@@ -12816,7 +12816,7 @@ class __PRIVATE_QueryEngine {
12816
12816
  /**
12817
12817
  * Performs a query based on the target's persisted query mapping. Returns
12818
12818
  * `null` if the mapping is not available or cannot be used.
12819
- */ Ss(e, t, n, r) {
12819
+ */ bs(e, t, n, r) {
12820
12820
  return __PRIVATE_queryMatchesAllDocuments(t) || r.isEqual(SnapshotVersion.min()) ? PersistencePromise.resolve(null) : this.ys.getDocuments(e, n).next((i => {
12821
12821
  const s = this.vs(t, i);
12822
12822
  return this.Cs(t, s, n, r) ? PersistencePromise.resolve(null) : (__PRIVATE_getLogLevel() <= logger.LogLevel.DEBUG && __PRIVATE_logDebug("QueryEngine", "Re-using previous result from %s to execute query: %s", r.toString(), __PRIVATE_stringifyQuery(t)),
@@ -12863,7 +12863,7 @@ class __PRIVATE_QueryEngine {
12863
12863
  const i = "F" /* LimitType.First */ === e.limitType ? t.last() : t.first();
12864
12864
  return !!i && (i.hasPendingWrites || i.version.compareTo(r) > 0);
12865
12865
  }
12866
- bs(e, t, n) {
12866
+ Ss(e, t, n) {
12867
12867
  return __PRIVATE_getLogLevel() <= logger.LogLevel.DEBUG && __PRIVATE_logDebug("QueryEngine", "Using full collection scan to execute query:", __PRIVATE_stringifyQuery(t)),
12868
12868
  this.ys.getDocumentsMatchingQuery(e, t, IndexOffset.min(), n);
12869
12869
  }
@@ -13739,7 +13739,7 @@ class __PRIVATE_LocalClientState {
13739
13739
  } else if (this.oo.test(t.key)) {
13740
13740
  if (null !== t.newValue) {
13741
13741
  const e = this.wo(t.key, t.newValue);
13742
- if (e) return this.So(e);
13742
+ if (e) return this.bo(e);
13743
13743
  }
13744
13744
  } else if (t.key === this._o) {
13745
13745
  if (null !== t.newValue) {
@@ -13752,7 +13752,7 @@ class __PRIVATE_LocalClientState {
13752
13752
  if (null != e) try {
13753
13753
  const n = JSON.parse(e);
13754
13754
  __PRIVATE_hardAssert("number" == typeof n, 30636, {
13755
- bo: e
13755
+ So: e
13756
13756
  }), t = n;
13757
13757
  } catch (e) {
13758
13758
  __PRIVATE_logError(Gt, "Failed to read sequence number from WebStorage", e);
@@ -13843,7 +13843,7 @@ class __PRIVATE_LocalClientState {
13843
13843
  if (e.user.uid === this.currentUser.uid) return this.syncEngine.Co(e.batchId, e.state, e.error);
13844
13844
  __PRIVATE_logDebug(Gt, `Ignoring mutation for non-active user ${e.user.uid}`);
13845
13845
  }
13846
- So(e) {
13846
+ bo(e) {
13847
13847
  return this.syncEngine.Fo(e.targetId, e.state, e.error);
13848
13848
  }
13849
13849
  mo(e, t) {
@@ -14210,7 +14210,9 @@ class __PRIVATE_RestConnection {
14210
14210
 
14211
14211
  class __PRIVATE_WebChannelConnection extends __PRIVATE_RestConnection {
14212
14212
  constructor(e) {
14213
- super(e), this.forceLongPolling = e.forceLongPolling, this.autoDetectLongPolling = e.autoDetectLongPolling,
14213
+ super(e),
14214
+ /** A collection of open WebChannel instances */
14215
+ this.l_ = [], this.forceLongPolling = e.forceLongPolling, this.autoDetectLongPolling = e.autoDetectLongPolling,
14214
14216
  this.useFetchStreams = e.useFetchStreams, this.longPollingOptions = e.longPollingOptions;
14215
14217
  }
14216
14218
  Jo(e, t, n, r, i) {
@@ -14252,10 +14254,10 @@ class __PRIVATE_WebChannelConnection extends __PRIVATE_RestConnection {
14252
14254
 
14253
14255
  default:
14254
14256
  fail(9055, {
14255
- l_: e,
14257
+ h_: e,
14256
14258
  streamId: s,
14257
- h_: _.getLastErrorCode(),
14258
- P_: _.getLastError()
14259
+ P_: _.getLastErrorCode(),
14260
+ T_: _.getLastError()
14259
14261
  });
14260
14262
  }
14261
14263
  } finally {
@@ -14266,7 +14268,7 @@ class __PRIVATE_WebChannelConnection extends __PRIVATE_RestConnection {
14266
14268
  __PRIVATE_logDebug(Yt, `RPC '${e}' ${s} sending request:`, r), _.send(t, "POST", a, n, 15);
14267
14269
  }));
14268
14270
  }
14269
- T_(e, t, n) {
14271
+ I_(e, t, n) {
14270
14272
  const r = __PRIVATE_generateUniqueDebugId(), i = [ this.Ko, "/", "google.firestore.v1.Firestore", "/", e, "/channel" ], s = webchannelBlob.createWebChannelTransport(), o = webchannelBlob.getStatEventTarget(), _ = {
14271
14273
  // Required for backend stickiness, routing behavior is based on this
14272
14274
  // parameter.
@@ -14306,12 +14308,13 @@ class __PRIVATE_WebChannelConnection extends __PRIVATE_RestConnection {
14306
14308
  const u = i.join("");
14307
14309
  __PRIVATE_logDebug(Yt, `Creating RPC '${e}' stream ${r}: ${u}`, _);
14308
14310
  const c = s.createWebChannel(u, _);
14311
+ this.E_(c);
14309
14312
  // WebChannel supports sending the first message with the handshake - saving
14310
14313
  // a network round trip. However, it will have to call send in the same
14311
14314
  // JS event loop as open. In order to enforce this, we delay actually
14312
14315
  // opening the WebChannel until send is called. Whether we have called
14313
14316
  // open is tracked with this variable.
14314
- let l = !1, h = !1;
14317
+ let l = !1, h = !1;
14315
14318
  // A flag to determine whether the stream was closed (by us or through an
14316
14319
  // error/close event) to avoid delivering multiple close events or sending
14317
14320
  // on a closed stream
@@ -14343,7 +14346,7 @@ class __PRIVATE_WebChannelConnection extends __PRIVATE_RestConnection {
14343
14346
  h || (__PRIVATE_logDebug(Yt, `RPC '${e}' stream ${r} transport opened.`), P.__());
14344
14347
  })), __PRIVATE_unguardedEventListen(c, webchannelBlob.WebChannel.EventType.CLOSE, (() => {
14345
14348
  h || (h = !0, __PRIVATE_logDebug(Yt, `RPC '${e}' stream ${r} transport closed`),
14346
- P.u_());
14349
+ P.u_(), this.d_(c));
14347
14350
  })), __PRIVATE_unguardedEventListen(c, webchannelBlob.WebChannel.EventType.ERROR, (t => {
14348
14351
  h || (h = !0, __PRIVATE_logWarn(Yt, `RPC '${e}' stream ${r} transport errored. Name:`, t.name, "Message:", t.message),
14349
14352
  P.u_(new FirestoreError(N.UNAVAILABLE, "The operation could not be completed")));
@@ -14390,6 +14393,25 @@ class __PRIVATE_WebChannelConnection extends __PRIVATE_RestConnection {
14390
14393
  P.a_();
14391
14394
  }), 0), P;
14392
14395
  }
14396
+ /**
14397
+ * Closes and cleans up any resources associated with the connection.
14398
+ */ terminate() {
14399
+ // If the Firestore instance is terminated, we will explicitly
14400
+ // close any remaining open WebChannel instances.
14401
+ this.l_.forEach((e => e.close())), this.l_ = [];
14402
+ }
14403
+ /**
14404
+ * Add a WebChannel instance to the collection of open instances.
14405
+ * @param webChannel
14406
+ */ E_(e) {
14407
+ this.l_.push(e);
14408
+ }
14409
+ /**
14410
+ * Remove a WebChannel instance from the collection of open instances.
14411
+ * @param webChannel
14412
+ */ d_(e) {
14413
+ this.l_ = this.l_.filter((t => t === e));
14414
+ }
14393
14415
  }
14394
14416
 
14395
14417
  /**
@@ -14507,10 +14529,10 @@ class __PRIVATE_ExponentialBackoff {
14507
14529
  * Note that jitter will still be applied, so the actual delay could be as
14508
14530
  * much as 1.5*maxDelayMs.
14509
14531
  */ , i = 6e4) {
14510
- this.xi = e, this.timerId = t, this.I_ = n, this.E_ = r, this.d_ = i, this.A_ = 0,
14511
- this.R_ = null,
14532
+ this.xi = e, this.timerId = t, this.A_ = n, this.R_ = r, this.V_ = i, this.m_ = 0,
14533
+ this.f_ = null,
14512
14534
  /** The last backoff attempt, as epoch milliseconds. */
14513
- this.V_ = Date.now(), this.reset();
14535
+ this.g_ = Date.now(), this.reset();
14514
14536
  }
14515
14537
  /**
14516
14538
  * Resets the backoff delay.
@@ -14519,40 +14541,40 @@ class __PRIVATE_ExponentialBackoff {
14519
14541
  * (i.e. due to an error), initialDelayMs (plus jitter) will be used, and
14520
14542
  * subsequent ones will increase according to the backoffFactor.
14521
14543
  */ reset() {
14522
- this.A_ = 0;
14544
+ this.m_ = 0;
14523
14545
  }
14524
14546
  /**
14525
14547
  * Resets the backoff delay to the maximum delay (e.g. for use after a
14526
14548
  * RESOURCE_EXHAUSTED error).
14527
- */ m_() {
14528
- this.A_ = this.d_;
14549
+ */ p_() {
14550
+ this.m_ = this.V_;
14529
14551
  }
14530
14552
  /**
14531
14553
  * Returns a promise that resolves after currentDelayMs, and increases the
14532
14554
  * delay for any subsequent attempts. If there was a pending backoff operation
14533
14555
  * already, it will be canceled.
14534
- */ f_(e) {
14556
+ */ y_(e) {
14535
14557
  // Cancel any pending backoff operation.
14536
14558
  this.cancel();
14537
14559
  // First schedule using the current base (which may be 0 and should be
14538
14560
  // honored as such).
14539
- const t = Math.floor(this.A_ + this.g_()), n = Math.max(0, Date.now() - this.V_), r = Math.max(0, t - n);
14561
+ const t = Math.floor(this.m_ + this.w_()), n = Math.max(0, Date.now() - this.g_), r = Math.max(0, t - n);
14540
14562
  // Guard against lastAttemptTime being in the future due to a clock change.
14541
- r > 0 && __PRIVATE_logDebug("ExponentialBackoff", `Backing off for ${r} ms (base delay: ${this.A_} ms, delay with jitter: ${t} ms, last attempt: ${n} ms ago)`),
14542
- this.R_ = this.xi.enqueueAfterDelay(this.timerId, r, (() => (this.V_ = Date.now(),
14563
+ r > 0 && __PRIVATE_logDebug("ExponentialBackoff", `Backing off for ${r} ms (base delay: ${this.m_} ms, delay with jitter: ${t} ms, last attempt: ${n} ms ago)`),
14564
+ this.f_ = this.xi.enqueueAfterDelay(this.timerId, r, (() => (this.g_ = Date.now(),
14543
14565
  e()))),
14544
14566
  // Apply backoff factor to determine next delay and ensure it is within
14545
14567
  // bounds.
14546
- this.A_ *= this.E_, this.A_ < this.I_ && (this.A_ = this.I_), this.A_ > this.d_ && (this.A_ = this.d_);
14568
+ this.m_ *= this.R_, this.m_ < this.A_ && (this.m_ = this.A_), this.m_ > this.V_ && (this.m_ = this.V_);
14547
14569
  }
14548
- p_() {
14549
- null !== this.R_ && (this.R_.skipDelay(), this.R_ = null);
14570
+ b_() {
14571
+ null !== this.f_ && (this.f_.skipDelay(), this.f_ = null);
14550
14572
  }
14551
14573
  cancel() {
14552
- null !== this.R_ && (this.R_.cancel(), this.R_ = null);
14574
+ null !== this.f_ && (this.f_.cancel(), this.f_ = null);
14553
14575
  }
14554
- /** Returns a random value in the range [-currentBaseMs/2, currentBaseMs/2] */ g_() {
14555
- return (Math.random() - .5) * this.A_;
14576
+ /** Returns a random value in the range [-currentBaseMs/2, currentBaseMs/2] */ w_() {
14577
+ return (Math.random() - .5) * this.m_;
14556
14578
  }
14557
14579
  }
14558
14580
 
@@ -14608,18 +14630,18 @@ class __PRIVATE_ExponentialBackoff {
14608
14630
  */
14609
14631
  class __PRIVATE_PersistentStream {
14610
14632
  constructor(e, t, n, r, i, s, o, _) {
14611
- this.xi = e, this.y_ = n, this.w_ = r, this.connection = i, this.authCredentialsProvider = s,
14633
+ this.xi = e, this.S_ = n, this.D_ = r, this.connection = i, this.authCredentialsProvider = s,
14612
14634
  this.appCheckCredentialsProvider = o, this.listener = _, this.state = 0 /* PersistentStreamState.Initial */ ,
14613
14635
  /**
14614
14636
  * A close count that's incremented every time the stream is closed; used by
14615
14637
  * getCloseGuardedDispatcher() to invalidate callbacks that happen after
14616
14638
  * close.
14617
14639
  */
14618
- this.S_ = 0, this.b_ = null, this.D_ = null, this.stream = null,
14640
+ this.v_ = 0, this.C_ = null, this.F_ = null, this.stream = null,
14619
14641
  /**
14620
14642
  * Count of response messages received.
14621
14643
  */
14622
- this.v_ = 0, this.C_ = new __PRIVATE_ExponentialBackoff(e, t);
14644
+ this.M_ = 0, this.x_ = new __PRIVATE_ExponentialBackoff(e, t);
14623
14645
  }
14624
14646
  /**
14625
14647
  * Returns true if start() has been called and no error has occurred. True
@@ -14627,13 +14649,13 @@ class __PRIVATE_PersistentStream {
14627
14649
  * encompasses respecting backoff, getting auth tokens, and starting the
14628
14650
  * actual RPC). Use isOpen() to determine if the stream is open and ready for
14629
14651
  * outbound requests.
14630
- */ F_() {
14631
- return 1 /* PersistentStreamState.Starting */ === this.state || 5 /* PersistentStreamState.Backoff */ === this.state || this.M_();
14652
+ */ O_() {
14653
+ return 1 /* PersistentStreamState.Starting */ === this.state || 5 /* PersistentStreamState.Backoff */ === this.state || this.N_();
14632
14654
  }
14633
14655
  /**
14634
14656
  * Returns true if the underlying RPC is open (the onOpen() listener has been
14635
14657
  * called) and the stream is ready for outbound requests.
14636
- */ M_() {
14658
+ */ N_() {
14637
14659
  return 2 /* PersistentStreamState.Open */ === this.state || 3 /* PersistentStreamState.Healthy */ === this.state;
14638
14660
  }
14639
14661
  /**
@@ -14643,7 +14665,7 @@ class __PRIVATE_PersistentStream {
14643
14665
  *
14644
14666
  * When start returns, isStarted() will return true.
14645
14667
  */ start() {
14646
- this.v_ = 0, 4 /* PersistentStreamState.Error */ !== this.state ? this.auth() : this.x_();
14668
+ this.M_ = 0, 4 /* PersistentStreamState.Error */ !== this.state ? this.auth() : this.B_();
14647
14669
  }
14648
14670
  /**
14649
14671
  * Stops the RPC. This call is idempotent and allowed regardless of the
@@ -14651,7 +14673,7 @@ class __PRIVATE_PersistentStream {
14651
14673
  *
14652
14674
  * When stop returns, isStarted() and isOpen() will both return false.
14653
14675
  */ async stop() {
14654
- this.F_() && await this.close(0 /* PersistentStreamState.Initial */);
14676
+ this.O_() && await this.close(0 /* PersistentStreamState.Initial */);
14655
14677
  }
14656
14678
  /**
14657
14679
  * After an error the stream will usually back off on the next attempt to
@@ -14660,8 +14682,8 @@ class __PRIVATE_PersistentStream {
14660
14682
  *
14661
14683
  * Each error will call the onClose() listener. That function can decide to
14662
14684
  * inhibit backoff if required.
14663
- */ O_() {
14664
- this.state = 0 /* PersistentStreamState.Initial */ , this.C_.reset();
14685
+ */ L_() {
14686
+ this.state = 0 /* PersistentStreamState.Initial */ , this.x_.reset();
14665
14687
  }
14666
14688
  /**
14667
14689
  * Marks this stream as idle. If no further actions are performed on the
@@ -14672,25 +14694,25 @@ class __PRIVATE_PersistentStream {
14672
14694
  *
14673
14695
  * Only streams that are in state 'Open' can be marked idle, as all other
14674
14696
  * states imply pending network operations.
14675
- */ N_() {
14697
+ */ k_() {
14676
14698
  // Starts the idle time if we are in state 'Open' and are not yet already
14677
14699
  // running a timer (in which case the previous idle timeout still applies).
14678
- this.M_() && null === this.b_ && (this.b_ = this.xi.enqueueAfterDelay(this.y_, 6e4, (() => this.B_())));
14700
+ this.N_() && null === this.C_ && (this.C_ = this.xi.enqueueAfterDelay(this.S_, 6e4, (() => this.q_())));
14679
14701
  }
14680
- /** Sends a message to the underlying stream. */ L_(e) {
14681
- this.k_(), this.stream.send(e);
14702
+ /** Sends a message to the underlying stream. */ Q_(e) {
14703
+ this.U_(), this.stream.send(e);
14682
14704
  }
14683
- /** Called by the idle timer when the stream should close due to inactivity. */ async B_() {
14684
- if (this.M_())
14705
+ /** Called by the idle timer when the stream should close due to inactivity. */ async q_() {
14706
+ if (this.N_())
14685
14707
  // When timing out an idle stream there's no reason to force the stream into backoff when
14686
14708
  // it restarts so set the stream state to Initial instead of Error.
14687
14709
  return this.close(0 /* PersistentStreamState.Initial */);
14688
14710
  }
14689
- /** Marks the stream as active again. */ k_() {
14690
- this.b_ && (this.b_.cancel(), this.b_ = null);
14711
+ /** Marks the stream as active again. */ U_() {
14712
+ this.C_ && (this.C_.cancel(), this.C_ = null);
14691
14713
  }
14692
- /** Cancels the health check delayed operation. */ q_() {
14693
- this.D_ && (this.D_.cancel(), this.D_ = null);
14714
+ /** Cancels the health check delayed operation. */ K_() {
14715
+ this.F_ && (this.F_.cancel(), this.F_ = null);
14694
14716
  }
14695
14717
  /**
14696
14718
  * Closes the stream and cleans up as necessary:
@@ -14706,15 +14728,15 @@ class __PRIVATE_PersistentStream {
14706
14728
  * @param error - the error the connection was closed with.
14707
14729
  */ async close(e, t) {
14708
14730
  // Cancel any outstanding timers (they're guaranteed not to execute).
14709
- this.k_(), this.q_(), this.C_.cancel(),
14731
+ this.U_(), this.K_(), this.x_.cancel(),
14710
14732
  // Invalidates any stream-related callbacks (e.g. from auth or the
14711
14733
  // underlying stream), guaranteeing they won't execute.
14712
- this.S_++, 4 /* PersistentStreamState.Error */ !== e ?
14734
+ this.v_++, 4 /* PersistentStreamState.Error */ !== e ?
14713
14735
  // If this is an intentional close ensure we don't delay our next connection attempt.
14714
- this.C_.reset() : t && t.code === N.RESOURCE_EXHAUSTED ? (
14736
+ this.x_.reset() : t && t.code === N.RESOURCE_EXHAUSTED ? (
14715
14737
  // Log the error. (Probably either 'quota exceeded' or 'max queue length reached'.)
14716
14738
  __PRIVATE_logError(t.toString()), __PRIVATE_logError("Using maximum backoff delay to prevent overloading the backend."),
14717
- this.C_.m_()) : t && t.code === N.UNAUTHENTICATED && 3 /* PersistentStreamState.Healthy */ !== this.state && (
14739
+ this.x_.p_()) : t && t.code === N.UNAUTHENTICATED && 3 /* PersistentStreamState.Healthy */ !== this.state && (
14718
14740
  // "unauthenticated" error means the token was rejected. This should rarely
14719
14741
  // happen since both Auth and AppCheck ensure a sufficient TTL when we
14720
14742
  // request a token. If a user manually resets their system clock this can
@@ -14723,7 +14745,7 @@ class __PRIVATE_PersistentStream {
14723
14745
  // to ensure that we fetch a new token.
14724
14746
  this.authCredentialsProvider.invalidateToken(), this.appCheckCredentialsProvider.invalidateToken()),
14725
14747
  // Clean up the underlying stream because we are no longer interested in events.
14726
- null !== this.stream && (this.Q_(), this.stream.close(), this.stream = null),
14748
+ null !== this.stream && (this.W_(), this.stream.close(), this.stream = null),
14727
14749
  // This state must be assigned before calling onClose() to allow the callback to
14728
14750
  // inhibit backoff or otherwise manipulate the state in its non-started state.
14729
14751
  this.state = e,
@@ -14733,48 +14755,48 @@ class __PRIVATE_PersistentStream {
14733
14755
  /**
14734
14756
  * Can be overridden to perform additional cleanup before the stream is closed.
14735
14757
  * Calling super.tearDown() is not required.
14736
- */ Q_() {}
14758
+ */ W_() {}
14737
14759
  auth() {
14738
14760
  this.state = 1 /* PersistentStreamState.Starting */;
14739
- const e = this.U_(this.S_), t = this.S_;
14761
+ const e = this.G_(this.v_), t = this.v_;
14740
14762
  // TODO(mikelehen): Just use dispatchIfNotClosed, but see TODO below.
14741
14763
  Promise.all([ this.authCredentialsProvider.getToken(), this.appCheckCredentialsProvider.getToken() ]).then((([e, n]) => {
14742
14764
  // Stream can be stopped while waiting for authentication.
14743
14765
  // TODO(mikelehen): We really should just use dispatchIfNotClosed
14744
14766
  // and let this dispatch onto the queue, but that opened a spec test can
14745
14767
  // of worms that I don't want to deal with in this PR.
14746
- this.S_ === t &&
14768
+ this.v_ === t &&
14747
14769
  // Normally we'd have to schedule the callback on the AsyncQueue.
14748
14770
  // However, the following calls are safe to be called outside the
14749
14771
  // AsyncQueue since they don't chain asynchronous calls
14750
- this.K_(e, n);
14772
+ this.z_(e, n);
14751
14773
  }), (t => {
14752
14774
  e((() => {
14753
14775
  const e = new FirestoreError(N.UNKNOWN, "Fetching auth token failed: " + t.message);
14754
- return this.W_(e);
14776
+ return this.j_(e);
14755
14777
  }));
14756
14778
  }));
14757
14779
  }
14758
- K_(e, t) {
14759
- const n = this.U_(this.S_);
14760
- this.stream = this.G_(e, t), this.stream.e_((() => {
14780
+ z_(e, t) {
14781
+ const n = this.G_(this.v_);
14782
+ this.stream = this.H_(e, t), this.stream.e_((() => {
14761
14783
  n((() => this.listener.e_()));
14762
14784
  })), this.stream.n_((() => {
14763
- n((() => (this.state = 2 /* PersistentStreamState.Open */ , this.D_ = this.xi.enqueueAfterDelay(this.w_, 1e4, (() => (this.M_() && (this.state = 3 /* PersistentStreamState.Healthy */),
14785
+ n((() => (this.state = 2 /* PersistentStreamState.Open */ , this.F_ = this.xi.enqueueAfterDelay(this.D_, 1e4, (() => (this.N_() && (this.state = 3 /* PersistentStreamState.Healthy */),
14764
14786
  Promise.resolve()))), this.listener.n_())));
14765
14787
  })), this.stream.i_((e => {
14766
- n((() => this.W_(e)));
14788
+ n((() => this.j_(e)));
14767
14789
  })), this.stream.onMessage((e => {
14768
- n((() => 1 == ++this.v_ ? this.z_(e) : this.onNext(e)));
14790
+ n((() => 1 == ++this.M_ ? this.J_(e) : this.onNext(e)));
14769
14791
  }));
14770
14792
  }
14771
- x_() {
14772
- this.state = 5 /* PersistentStreamState.Backoff */ , this.C_.f_((async () => {
14793
+ B_() {
14794
+ this.state = 5 /* PersistentStreamState.Backoff */ , this.x_.y_((async () => {
14773
14795
  this.state = 0 /* PersistentStreamState.Initial */ , this.start();
14774
14796
  }));
14775
14797
  }
14776
14798
  // Visible for tests
14777
- W_(e) {
14799
+ j_(e) {
14778
14800
  // In theory the stream could close cleanly, however, in our current model
14779
14801
  // we never expect this to happen because if we stop a stream ourselves,
14780
14802
  // this callback will never be called. To prevent cases where we retry
@@ -14786,9 +14808,9 @@ class __PRIVATE_PersistentStream {
14786
14808
  * AsyncQueue but only runs them if closeCount remains unchanged. This allows
14787
14809
  * us to turn auth / stream callbacks into no-ops if the stream is closed /
14788
14810
  * re-opened, etc.
14789
- */ U_(e) {
14811
+ */ G_(e) {
14790
14812
  return t => {
14791
- this.xi.enqueueAndForget((() => this.S_ === e ? t() : (__PRIVATE_logDebug(Zt, "stream callback skipped by getCloseGuardedDispatcher."),
14813
+ this.xi.enqueueAndForget((() => this.v_ === e ? t() : (__PRIVATE_logDebug(Zt, "stream callback skipped by getCloseGuardedDispatcher."),
14792
14814
  Promise.resolve())));
14793
14815
  };
14794
14816
  }
@@ -14805,15 +14827,15 @@ class __PRIVATE_PersistentStream {
14805
14827
  super(e, "listen_stream_connection_backoff" /* TimerId.ListenStreamConnectionBackoff */ , "listen_stream_idle" /* TimerId.ListenStreamIdle */ , "health_check_timeout" /* TimerId.HealthCheckTimeout */ , t, n, r, s),
14806
14828
  this.serializer = i;
14807
14829
  }
14808
- G_(e, t) {
14809
- return this.connection.T_("Listen", e, t);
14830
+ H_(e, t) {
14831
+ return this.connection.I_("Listen", e, t);
14810
14832
  }
14811
- z_(e) {
14833
+ J_(e) {
14812
14834
  return this.onNext(e);
14813
14835
  }
14814
14836
  onNext(e) {
14815
14837
  // A successful response means the stream is healthy
14816
- this.C_.reset();
14838
+ this.x_.reset();
14817
14839
  const t = __PRIVATE_fromWatchChange(this.serializer, e), n = function __PRIVATE_versionFromListenResponse(e) {
14818
14840
  // We have only reached a consistent snapshot for the entire stream if there
14819
14841
  // is a read_time set and it applies to all targets (i.e. the list of
@@ -14822,14 +14844,14 @@ class __PRIVATE_PersistentStream {
14822
14844
  const t = e.targetChange;
14823
14845
  return t.targetIds && t.targetIds.length ? SnapshotVersion.min() : t.readTime ? __PRIVATE_fromVersion(t.readTime) : SnapshotVersion.min();
14824
14846
  }(e);
14825
- return this.listener.j_(t, n);
14847
+ return this.listener.Y_(t, n);
14826
14848
  }
14827
14849
  /**
14828
14850
  * Registers interest in the results of the given target. If the target
14829
14851
  * includes a resumeToken it will be included in the request. Results that
14830
14852
  * affect the target will be streamed back as WatchChange messages that
14831
14853
  * reference the targetId.
14832
- */ H_(e) {
14854
+ */ Z_(e) {
14833
14855
  const t = {};
14834
14856
  t.database = __PRIVATE_getEncodedDatabaseId(this.serializer), t.addTarget = function __PRIVATE_toTarget(e, t) {
14835
14857
  let n;
@@ -14853,15 +14875,15 @@ class __PRIVATE_PersistentStream {
14853
14875
  return n;
14854
14876
  }(this.serializer, e);
14855
14877
  const n = __PRIVATE_toListenRequestLabels(this.serializer, e);
14856
- n && (t.labels = n), this.L_(t);
14878
+ n && (t.labels = n), this.Q_(t);
14857
14879
  }
14858
14880
  /**
14859
14881
  * Unregisters interest in the results of the target associated with the
14860
14882
  * given targetId.
14861
- */ J_(e) {
14883
+ */ X_(e) {
14862
14884
  const t = {};
14863
14885
  t.database = __PRIVATE_getEncodedDatabaseId(this.serializer), t.removeTarget = e,
14864
- this.L_(t);
14886
+ this.Q_(t);
14865
14887
  }
14866
14888
  }
14867
14889
 
@@ -14889,24 +14911,24 @@ class __PRIVATE_PersistentStream {
14889
14911
  /**
14890
14912
  * Tracks whether or not a handshake has been successfully exchanged and
14891
14913
  * the stream is ready to accept mutations.
14892
- */ get Y_() {
14893
- return this.v_ > 0;
14914
+ */ get ea() {
14915
+ return this.M_ > 0;
14894
14916
  }
14895
14917
  // Override of PersistentStream.start
14896
14918
  start() {
14897
14919
  this.lastStreamToken = void 0, super.start();
14898
14920
  }
14899
- Q_() {
14900
- this.Y_ && this.Z_([]);
14921
+ W_() {
14922
+ this.ea && this.ta([]);
14901
14923
  }
14902
- G_(e, t) {
14903
- return this.connection.T_("Write", e, t);
14924
+ H_(e, t) {
14925
+ return this.connection.I_("Write", e, t);
14904
14926
  }
14905
- z_(e) {
14927
+ J_(e) {
14906
14928
  // Always capture the last stream token.
14907
14929
  return __PRIVATE_hardAssert(!!e.streamToken, 31322), this.lastStreamToken = e.streamToken,
14908
14930
  // The first response is always the handshake response
14909
- __PRIVATE_hardAssert(!e.writeResults || 0 === e.writeResults.length, 55816), this.listener.X_();
14931
+ __PRIVATE_hardAssert(!e.writeResults || 0 === e.writeResults.length, 55816), this.listener.na();
14910
14932
  }
14911
14933
  onNext(e) {
14912
14934
  // Always capture the last stream token.
@@ -14914,26 +14936,26 @@ class __PRIVATE_PersistentStream {
14914
14936
  // A successful first write response means the stream is healthy,
14915
14937
  // Note, that we could consider a successful handshake healthy, however,
14916
14938
  // the write itself might be causing an error we want to back off from.
14917
- this.C_.reset();
14939
+ this.x_.reset();
14918
14940
  const t = __PRIVATE_fromWriteResults(e.writeResults, e.commitTime), n = __PRIVATE_fromVersion(e.commitTime);
14919
- return this.listener.ea(n, t);
14941
+ return this.listener.ra(n, t);
14920
14942
  }
14921
14943
  /**
14922
14944
  * Sends an initial streamToken to the server, performing the handshake
14923
14945
  * required to make the StreamingWrite RPC work. Subsequent
14924
14946
  * calls should wait until onHandshakeComplete was called.
14925
- */ ta() {
14947
+ */ ia() {
14926
14948
  // TODO(dimond): Support stream resumption. We intentionally do not set the
14927
14949
  // stream token on the handshake, ignoring any stream token we might have.
14928
14950
  const e = {};
14929
- e.database = __PRIVATE_getEncodedDatabaseId(this.serializer), this.L_(e);
14951
+ e.database = __PRIVATE_getEncodedDatabaseId(this.serializer), this.Q_(e);
14930
14952
  }
14931
- /** Sends a group of mutations to the Firestore backend to apply. */ Z_(e) {
14953
+ /** Sends a group of mutations to the Firestore backend to apply. */ ta(e) {
14932
14954
  const t = {
14933
14955
  streamToken: this.lastStreamToken,
14934
14956
  writes: e.map((e => toMutation(this.serializer, e)))
14935
14957
  };
14936
- this.L_(t);
14958
+ this.Q_(t);
14937
14959
  }
14938
14960
  }
14939
14961
 
@@ -14965,25 +14987,25 @@ class __PRIVATE_PersistentStream {
14965
14987
  */ class __PRIVATE_DatastoreImpl extends Datastore {
14966
14988
  constructor(e, t, n, r) {
14967
14989
  super(), this.authCredentials = e, this.appCheckCredentials = t, this.connection = n,
14968
- this.serializer = r, this.na = !1;
14990
+ this.serializer = r, this.sa = !1;
14969
14991
  }
14970
- ra() {
14971
- if (this.na) throw new FirestoreError(N.FAILED_PRECONDITION, "The client has already been terminated.");
14992
+ oa() {
14993
+ if (this.sa) throw new FirestoreError(N.FAILED_PRECONDITION, "The client has already been terminated.");
14972
14994
  }
14973
14995
  /** Invokes the provided RPC with auth and AppCheck tokens. */ zo(e, t, n, r) {
14974
- return this.ra(), Promise.all([ this.authCredentials.getToken(), this.appCheckCredentials.getToken() ]).then((([i, s]) => this.connection.zo(e, __PRIVATE_toResourcePath(t, n), r, i, s))).catch((e => {
14996
+ return this.oa(), Promise.all([ this.authCredentials.getToken(), this.appCheckCredentials.getToken() ]).then((([i, s]) => this.connection.zo(e, __PRIVATE_toResourcePath(t, n), r, i, s))).catch((e => {
14975
14997
  throw "FirebaseError" === e.name ? (e.code === N.UNAUTHENTICATED && (this.authCredentials.invalidateToken(),
14976
14998
  this.appCheckCredentials.invalidateToken()), e) : new FirestoreError(N.UNKNOWN, e.toString());
14977
14999
  }));
14978
15000
  }
14979
15001
  /** Invokes the provided RPC with streamed results with auth and AppCheck tokens. */ Yo(e, t, n, r, i) {
14980
- return this.ra(), Promise.all([ this.authCredentials.getToken(), this.appCheckCredentials.getToken() ]).then((([s, o]) => this.connection.Yo(e, __PRIVATE_toResourcePath(t, n), r, s, o, i))).catch((e => {
15002
+ return this.oa(), Promise.all([ this.authCredentials.getToken(), this.appCheckCredentials.getToken() ]).then((([s, o]) => this.connection.Yo(e, __PRIVATE_toResourcePath(t, n), r, s, o, i))).catch((e => {
14981
15003
  throw "FirebaseError" === e.name ? (e.code === N.UNAUTHENTICATED && (this.authCredentials.invalidateToken(),
14982
15004
  this.appCheckCredentials.invalidateToken()), e) : new FirestoreError(N.UNKNOWN, e.toString());
14983
15005
  }));
14984
15006
  }
14985
15007
  terminate() {
14986
- this.na = !0, this.connection.terminate();
15008
+ this.sa = !0, this.connection.terminate();
14987
15009
  }
14988
15010
  }
14989
15011
 
@@ -15010,19 +15032,19 @@ class __PRIVATE_OnlineStateTracker {
15010
15032
  * maximum defined by MAX_WATCH_STREAM_FAILURES, we'll set the OnlineState to
15011
15033
  * Offline.
15012
15034
  */
15013
- this.ia = 0,
15035
+ this._a = 0,
15014
15036
  /**
15015
15037
  * A timer that elapses after ONLINE_STATE_TIMEOUT_MS, at which point we
15016
15038
  * transition from OnlineState.Unknown to OnlineState.Offline without waiting
15017
15039
  * for the stream to actually fail (MAX_WATCH_STREAM_FAILURES times).
15018
15040
  */
15019
- this.sa = null,
15041
+ this.aa = null,
15020
15042
  /**
15021
15043
  * Whether the client should log a warning message if it fails to connect to
15022
15044
  * the backend (initially true, cleared after a successful stream, or if we've
15023
15045
  * logged the message already).
15024
15046
  */
15025
- this.oa = !0;
15047
+ this.ua = !0;
15026
15048
  }
15027
15049
  /**
15028
15050
  * Called by RemoteStore when a watch stream is started (including on each
@@ -15030,9 +15052,9 @@ class __PRIVATE_OnlineStateTracker {
15030
15052
  *
15031
15053
  * If this is the first attempt, it sets the OnlineState to Unknown and starts
15032
15054
  * the onlineStateTimer.
15033
- */ _a() {
15034
- 0 === this.ia && (this.aa("Unknown" /* OnlineState.Unknown */), this.sa = this.asyncQueue.enqueueAfterDelay("online_state_timeout" /* TimerId.OnlineStateTimeout */ , 1e4, (() => (this.sa = null,
15035
- this.ua("Backend didn't respond within 10 seconds."), this.aa("Offline" /* OnlineState.Offline */),
15055
+ */ ca() {
15056
+ 0 === this._a && (this.la("Unknown" /* OnlineState.Unknown */), this.aa = this.asyncQueue.enqueueAfterDelay("online_state_timeout" /* TimerId.OnlineStateTimeout */ , 1e4, (() => (this.aa = null,
15057
+ this.ha("Backend didn't respond within 10 seconds."), this.la("Offline" /* OnlineState.Offline */),
15036
15058
  Promise.resolve()))));
15037
15059
  }
15038
15060
  /**
@@ -15040,10 +15062,10 @@ class __PRIVATE_OnlineStateTracker {
15040
15062
  * failure. The first failure moves us to the 'Unknown' state. We then may
15041
15063
  * allow multiple failures (based on MAX_WATCH_STREAM_FAILURES) before we
15042
15064
  * actually transition to the 'Offline' state.
15043
- */ ca(e) {
15044
- "Online" /* OnlineState.Online */ === this.state ? this.aa("Unknown" /* OnlineState.Unknown */) : (this.ia++,
15045
- this.ia >= 1 && (this.la(), this.ua(`Connection failed 1 times. Most recent error: ${e.toString()}`),
15046
- this.aa("Offline" /* OnlineState.Offline */)));
15065
+ */ Pa(e) {
15066
+ "Online" /* OnlineState.Online */ === this.state ? this.la("Unknown" /* OnlineState.Unknown */) : (this._a++,
15067
+ this._a >= 1 && (this.Ta(), this.ha(`Connection failed 1 times. Most recent error: ${e.toString()}`),
15068
+ this.la("Offline" /* OnlineState.Offline */)));
15047
15069
  }
15048
15070
  /**
15049
15071
  * Explicitly sets the OnlineState to the specified state.
@@ -15052,20 +15074,20 @@ class __PRIVATE_OnlineStateTracker {
15052
15074
  * Offline heuristics, so must not be used in place of
15053
15075
  * handleWatchStreamStart() and handleWatchStreamFailure().
15054
15076
  */ set(e) {
15055
- this.la(), this.ia = 0, "Online" /* OnlineState.Online */ === e && (
15077
+ this.Ta(), this._a = 0, "Online" /* OnlineState.Online */ === e && (
15056
15078
  // We've connected to watch at least once. Don't warn the developer
15057
15079
  // about being offline going forward.
15058
- this.oa = !1), this.aa(e);
15080
+ this.ua = !1), this.la(e);
15059
15081
  }
15060
- aa(e) {
15082
+ la(e) {
15061
15083
  e !== this.state && (this.state = e, this.onlineStateHandler(e));
15062
15084
  }
15063
- ua(e) {
15085
+ ha(e) {
15064
15086
  const t = `Could not reach Cloud Firestore backend. ${e}\nThis typically indicates that your device does not have a healthy Internet connection at the moment. The client will operate in offline mode until it is able to successfully connect to the backend.`;
15065
- this.oa ? (__PRIVATE_logError(t), this.oa = !1) : __PRIVATE_logDebug("OnlineStateTracker", t);
15087
+ this.ua ? (__PRIVATE_logError(t), this.ua = !1) : __PRIVATE_logDebug("OnlineStateTracker", t);
15066
15088
  }
15067
- la() {
15068
- null !== this.sa && (this.sa.cancel(), this.sa = null);
15089
+ Ta() {
15090
+ null !== this.aa && (this.aa.cancel(), this.aa = null);
15069
15091
  }
15070
15092
  }
15071
15093
 
@@ -15113,7 +15135,7 @@ class __PRIVATE_RemoteStoreImpl {
15113
15135
  * purely based on order, and so we can just shift() writes from the front of
15114
15136
  * the writePipeline as we receive responses.
15115
15137
  */
15116
- this.ha = [],
15138
+ this.Ia = [],
15117
15139
  /**
15118
15140
  * A mapping of watched targets that the client cares about tracking and the
15119
15141
  * user has explicitly called a 'listen' for this target.
@@ -15123,12 +15145,12 @@ class __PRIVATE_RemoteStoreImpl {
15123
15145
  * to the server. The targets removed with unlistens are removed eagerly
15124
15146
  * without waiting for confirmation from the listen stream.
15125
15147
  */
15126
- this.Pa = new Map,
15148
+ this.Ea = new Map,
15127
15149
  /**
15128
15150
  * A set of reasons for why the RemoteStore may be offline. If empty, the
15129
15151
  * RemoteStore may start its network connections.
15130
15152
  */
15131
- this.Ta = new Set,
15153
+ this.da = new Set,
15132
15154
  /**
15133
15155
  * Event handlers that get called when the network is disabled or enabled.
15134
15156
  *
@@ -15136,7 +15158,7 @@ class __PRIVATE_RemoteStoreImpl {
15136
15158
  * underlying streams (to support tree-shakeable streams). On Android and iOS,
15137
15159
  * the streams are created during construction of RemoteStore.
15138
15160
  */
15139
- this.Ia = [], this.Ea = i, this.Ea.No((e => {
15161
+ this.Aa = [], this.Ra = i, this.Ra.No((e => {
15140
15162
  n.enqueueAndForget((async () => {
15141
15163
  // Porting Note: Unlike iOS, `restartNetwork()` is called even when the
15142
15164
  // network becomes unreachable as we don't have any other way to tear
@@ -15144,24 +15166,24 @@ class __PRIVATE_RemoteStoreImpl {
15144
15166
  __PRIVATE_canUseNetwork(this) && (__PRIVATE_logDebug(Xt, "Restarting streams for network reachability change."),
15145
15167
  await async function __PRIVATE_restartNetwork(e) {
15146
15168
  const t = __PRIVATE_debugCast(e);
15147
- t.Ta.add(4 /* OfflineCause.ConnectivityChange */), await __PRIVATE_disableNetworkInternal(t),
15148
- t.da.set("Unknown" /* OnlineState.Unknown */), t.Ta.delete(4 /* OfflineCause.ConnectivityChange */),
15169
+ t.da.add(4 /* OfflineCause.ConnectivityChange */), await __PRIVATE_disableNetworkInternal(t),
15170
+ t.Va.set("Unknown" /* OnlineState.Unknown */), t.da.delete(4 /* OfflineCause.ConnectivityChange */),
15149
15171
  await __PRIVATE_enableNetworkInternal(t);
15150
15172
  }(this));
15151
15173
  }));
15152
- })), this.da = new __PRIVATE_OnlineStateTracker(n, r);
15174
+ })), this.Va = new __PRIVATE_OnlineStateTracker(n, r);
15153
15175
  }
15154
15176
  }
15155
15177
 
15156
15178
  async function __PRIVATE_enableNetworkInternal(e) {
15157
- if (__PRIVATE_canUseNetwork(e)) for (const t of e.Ia) await t(/* enabled= */ !0);
15179
+ if (__PRIVATE_canUseNetwork(e)) for (const t of e.Aa) await t(/* enabled= */ !0);
15158
15180
  }
15159
15181
 
15160
15182
  /**
15161
15183
  * Temporarily disables the network. The network can be re-enabled using
15162
15184
  * enableNetwork().
15163
15185
  */ async function __PRIVATE_disableNetworkInternal(e) {
15164
- for (const t of e.Ia) await t(/* enabled= */ !1);
15186
+ for (const t of e.Aa) await t(/* enabled= */ !1);
15165
15187
  }
15166
15188
 
15167
15189
  /**
@@ -15170,11 +15192,11 @@ async function __PRIVATE_enableNetworkInternal(e) {
15170
15192
  */
15171
15193
  function __PRIVATE_remoteStoreListen(e, t) {
15172
15194
  const n = __PRIVATE_debugCast(e);
15173
- n.Pa.has(t.targetId) || (
15195
+ n.Ea.has(t.targetId) || (
15174
15196
  // Mark this as something the client is currently listening for.
15175
- n.Pa.set(t.targetId, t), __PRIVATE_shouldStartWatchStream(n) ?
15197
+ n.Ea.set(t.targetId, t), __PRIVATE_shouldStartWatchStream(n) ?
15176
15198
  // The listen will be sent in onWatchStreamOpen
15177
- __PRIVATE_startWatchStream(n) : __PRIVATE_ensureWatchStream(n).M_() && __PRIVATE_sendWatchRequest(n, t));
15199
+ __PRIVATE_startWatchStream(n) : __PRIVATE_ensureWatchStream(n).N_() && __PRIVATE_sendWatchRequest(n, t));
15178
15200
  }
15179
15201
 
15180
15202
  /**
@@ -15182,22 +15204,22 @@ function __PRIVATE_remoteStoreListen(e, t) {
15182
15204
  * not being listened to.
15183
15205
  */ function __PRIVATE_remoteStoreUnlisten(e, t) {
15184
15206
  const n = __PRIVATE_debugCast(e), r = __PRIVATE_ensureWatchStream(n);
15185
- n.Pa.delete(t), r.M_() && __PRIVATE_sendUnwatchRequest(n, t), 0 === n.Pa.size && (r.M_() ? r.N_() : __PRIVATE_canUseNetwork(n) &&
15207
+ n.Ea.delete(t), r.N_() && __PRIVATE_sendUnwatchRequest(n, t), 0 === n.Ea.size && (r.N_() ? r.k_() : __PRIVATE_canUseNetwork(n) &&
15186
15208
  // Revert to OnlineState.Unknown if the watch stream is not open and we
15187
15209
  // have no listeners, since without any listens to send we cannot
15188
15210
  // confirm if the stream is healthy and upgrade to OnlineState.Online.
15189
- n.da.set("Unknown" /* OnlineState.Unknown */));
15211
+ n.Va.set("Unknown" /* OnlineState.Unknown */));
15190
15212
  }
15191
15213
 
15192
15214
  /**
15193
15215
  * We need to increment the expected number of pending responses we're due
15194
15216
  * from watch so we wait for the ack to process any messages from this target.
15195
15217
  */ function __PRIVATE_sendWatchRequest(e, t) {
15196
- if (e.Aa.Ke(t.targetId), t.resumeToken.approximateByteSize() > 0 || t.snapshotVersion.compareTo(SnapshotVersion.min()) > 0) {
15218
+ if (e.ma.Ke(t.targetId), t.resumeToken.approximateByteSize() > 0 || t.snapshotVersion.compareTo(SnapshotVersion.min()) > 0) {
15197
15219
  const n = e.remoteSyncer.getRemoteKeysForTarget(t.targetId).size;
15198
15220
  t = t.withExpectedCount(n);
15199
15221
  }
15200
- __PRIVATE_ensureWatchStream(e).H_(t);
15222
+ __PRIVATE_ensureWatchStream(e).Z_(t);
15201
15223
  }
15202
15224
 
15203
15225
  /**
@@ -15205,39 +15227,39 @@ function __PRIVATE_remoteStoreListen(e, t) {
15205
15227
  * from watch so we wait for the removal on the server before we process any
15206
15228
  * messages from this target.
15207
15229
  */ function __PRIVATE_sendUnwatchRequest(e, t) {
15208
- e.Aa.Ke(t), __PRIVATE_ensureWatchStream(e).J_(t);
15230
+ e.ma.Ke(t), __PRIVATE_ensureWatchStream(e).X_(t);
15209
15231
  }
15210
15232
 
15211
15233
  function __PRIVATE_startWatchStream(e) {
15212
- e.Aa = new __PRIVATE_WatchChangeAggregator({
15234
+ e.ma = new __PRIVATE_WatchChangeAggregator({
15213
15235
  getRemoteKeysForTarget: t => e.remoteSyncer.getRemoteKeysForTarget(t),
15214
- Rt: t => e.Pa.get(t) || null,
15236
+ Rt: t => e.Ea.get(t) || null,
15215
15237
  Pt: () => e.datastore.serializer.databaseId
15216
- }), __PRIVATE_ensureWatchStream(e).start(), e.da._a();
15238
+ }), __PRIVATE_ensureWatchStream(e).start(), e.Va.ca();
15217
15239
  }
15218
15240
 
15219
15241
  /**
15220
15242
  * Returns whether the watch stream should be started because it's necessary
15221
15243
  * and has not yet been started.
15222
15244
  */ function __PRIVATE_shouldStartWatchStream(e) {
15223
- return __PRIVATE_canUseNetwork(e) && !__PRIVATE_ensureWatchStream(e).F_() && e.Pa.size > 0;
15245
+ return __PRIVATE_canUseNetwork(e) && !__PRIVATE_ensureWatchStream(e).O_() && e.Ea.size > 0;
15224
15246
  }
15225
15247
 
15226
15248
  function __PRIVATE_canUseNetwork(e) {
15227
- return 0 === __PRIVATE_debugCast(e).Ta.size;
15249
+ return 0 === __PRIVATE_debugCast(e).da.size;
15228
15250
  }
15229
15251
 
15230
15252
  function __PRIVATE_cleanUpWatchStreamState(e) {
15231
- e.Aa = void 0;
15253
+ e.ma = void 0;
15232
15254
  }
15233
15255
 
15234
15256
  async function __PRIVATE_onWatchStreamConnected(e) {
15235
15257
  // Mark the client as online since we got a "connected" notification.
15236
- e.da.set("Online" /* OnlineState.Online */);
15258
+ e.Va.set("Online" /* OnlineState.Online */);
15237
15259
  }
15238
15260
 
15239
15261
  async function __PRIVATE_onWatchStreamOpen(e) {
15240
- e.Pa.forEach(((t, n) => {
15262
+ e.Ea.forEach(((t, n) => {
15241
15263
  __PRIVATE_sendWatchRequest(e, t);
15242
15264
  }));
15243
15265
  }
@@ -15245,17 +15267,17 @@ async function __PRIVATE_onWatchStreamOpen(e) {
15245
15267
  async function __PRIVATE_onWatchStreamClose(e, t) {
15246
15268
  __PRIVATE_cleanUpWatchStreamState(e),
15247
15269
  // If we still need the watch stream, retry the connection.
15248
- __PRIVATE_shouldStartWatchStream(e) ? (e.da.ca(t), __PRIVATE_startWatchStream(e)) :
15270
+ __PRIVATE_shouldStartWatchStream(e) ? (e.Va.Pa(t), __PRIVATE_startWatchStream(e)) :
15249
15271
  // No need to restart watch stream because there are no active targets.
15250
15272
  // The online state is set to unknown because there is no active attempt
15251
15273
  // at establishing a connection
15252
- e.da.set("Unknown" /* OnlineState.Unknown */);
15274
+ e.Va.set("Unknown" /* OnlineState.Unknown */);
15253
15275
  }
15254
15276
 
15255
15277
  async function __PRIVATE_onWatchStreamChange(e, t, n) {
15256
15278
  if (
15257
15279
  // Mark the client as online since we got a message from the server
15258
- e.da.set("Online" /* OnlineState.Online */), t instanceof __PRIVATE_WatchTargetChange && 2 /* WatchTargetChangeState.Removed */ === t.state && t.cause)
15280
+ e.Va.set("Online" /* OnlineState.Online */), t instanceof __PRIVATE_WatchTargetChange && 2 /* WatchTargetChangeState.Removed */ === t.state && t.cause)
15259
15281
  // There was an error on a target, don't wait for a consistent snapshot
15260
15282
  // to raise events
15261
15283
  try {
@@ -15264,7 +15286,7 @@ async function __PRIVATE_onWatchStreamChange(e, t, n) {
15264
15286
  const n = t.cause;
15265
15287
  for (const r of t.targetIds)
15266
15288
  // A watched target might have been removed already.
15267
- e.Pa.has(r) && (await e.remoteSyncer.rejectListen(r, n), e.Pa.delete(r), e.Aa.removeTarget(r));
15289
+ e.Ea.has(r) && (await e.remoteSyncer.rejectListen(r, n), e.Ea.delete(r), e.ma.removeTarget(r));
15268
15290
  }
15269
15291
  /**
15270
15292
  * Attempts to fill our write pipeline with writes from the LocalStore.
@@ -15277,7 +15299,7 @@ async function __PRIVATE_onWatchStreamChange(e, t, n) {
15277
15299
  } catch (n) {
15278
15300
  __PRIVATE_logDebug(Xt, "Failed to remove targets %s: %s ", t.targetIds.join(","), n),
15279
15301
  await __PRIVATE_disableNetworkUntilRecovery(e, n);
15280
- } else if (t instanceof __PRIVATE_DocumentWatchChange ? e.Aa.Xe(t) : t instanceof __PRIVATE_ExistenceFilterChange ? e.Aa.ot(t) : e.Aa.nt(t),
15302
+ } else if (t instanceof __PRIVATE_DocumentWatchChange ? e.ma.Xe(t) : t instanceof __PRIVATE_ExistenceFilterChange ? e.ma.ot(t) : e.ma.nt(t),
15281
15303
  !n.isEqual(SnapshotVersion.min())) try {
15282
15304
  const t = await __PRIVATE_localStoreGetLastRemoteSnapshotVersion(e.localStore);
15283
15305
  n.compareTo(t) >= 0 &&
@@ -15289,26 +15311,26 @@ async function __PRIVATE_onWatchStreamChange(e, t, n) {
15289
15311
  * SyncEngine.
15290
15312
  */
15291
15313
  await function __PRIVATE_raiseWatchSnapshot(e, t) {
15292
- const n = e.Aa.It(t);
15314
+ const n = e.ma.It(t);
15293
15315
  // Update in-memory resume tokens. LocalStore will update the
15294
15316
  // persistent view of these when applying the completed RemoteEvent.
15295
15317
  return n.targetChanges.forEach(((n, r) => {
15296
15318
  if (n.resumeToken.approximateByteSize() > 0) {
15297
- const i = e.Pa.get(r);
15319
+ const i = e.Ea.get(r);
15298
15320
  // A watched target might have been removed already.
15299
- i && e.Pa.set(r, i.withResumeToken(n.resumeToken, t));
15321
+ i && e.Ea.set(r, i.withResumeToken(n.resumeToken, t));
15300
15322
  }
15301
15323
  })),
15302
15324
  // Re-establish listens for the targets that have been invalidated by
15303
15325
  // existence filter mismatches.
15304
15326
  n.targetMismatches.forEach(((t, n) => {
15305
- const r = e.Pa.get(t);
15327
+ const r = e.Ea.get(t);
15306
15328
  if (!r)
15307
15329
  // A watched target might have been removed already.
15308
15330
  return;
15309
15331
  // Clear the resume token for the target, since we're in a known mismatch
15310
15332
  // state.
15311
- e.Pa.set(t, r.withResumeToken(ByteString.EMPTY_BYTE_STRING, r.snapshotVersion)),
15333
+ e.Ea.set(t, r.withResumeToken(ByteString.EMPTY_BYTE_STRING, r.snapshotVersion)),
15312
15334
  // Cause a hard reset by unwatching and rewatching immediately, but
15313
15335
  // deliberately don't send a resume token so that we get a full update.
15314
15336
  __PRIVATE_sendUnwatchRequest(e, t);
@@ -15335,9 +15357,9 @@ async function __PRIVATE_onWatchStreamChange(e, t, n) {
15335
15357
  * any retry attempt.
15336
15358
  */ async function __PRIVATE_disableNetworkUntilRecovery(e, t, n) {
15337
15359
  if (!__PRIVATE_isIndexedDbTransactionError(t)) throw t;
15338
- e.Ta.add(1 /* OfflineCause.IndexedDbFailed */),
15360
+ e.da.add(1 /* OfflineCause.IndexedDbFailed */),
15339
15361
  // Disable network and raise offline snapshots
15340
- await __PRIVATE_disableNetworkInternal(e), e.da.set("Offline" /* OnlineState.Offline */),
15362
+ await __PRIVATE_disableNetworkInternal(e), e.Va.set("Offline" /* OnlineState.Offline */),
15341
15363
  n || (
15342
15364
  // Use a simple read operation to determine if IndexedDB recovered.
15343
15365
  // Ideally, we would expose a health check directly on SimpleDb, but
@@ -15345,7 +15367,7 @@ async function __PRIVATE_onWatchStreamChange(e, t, n) {
15345
15367
  n = () => __PRIVATE_localStoreGetLastRemoteSnapshotVersion(e.localStore)),
15346
15368
  // Probe IndexedDB periodically and re-enable network
15347
15369
  e.asyncQueue.enqueueRetryable((async () => {
15348
- __PRIVATE_logDebug(Xt, "Retrying IndexedDB access"), await n(), e.Ta.delete(1 /* OfflineCause.IndexedDbFailed */),
15370
+ __PRIVATE_logDebug(Xt, "Retrying IndexedDB access"), await n(), e.da.delete(1 /* OfflineCause.IndexedDbFailed */),
15349
15371
  await __PRIVATE_enableNetworkInternal(e);
15350
15372
  }));
15351
15373
  }
@@ -15359,11 +15381,11 @@ async function __PRIVATE_onWatchStreamChange(e, t, n) {
15359
15381
 
15360
15382
  async function __PRIVATE_fillWritePipeline(e) {
15361
15383
  const t = __PRIVATE_debugCast(e), n = __PRIVATE_ensureWriteStream(t);
15362
- let r = t.ha.length > 0 ? t.ha[t.ha.length - 1].batchId : G;
15384
+ let r = t.Ia.length > 0 ? t.Ia[t.Ia.length - 1].batchId : G;
15363
15385
  for (;__PRIVATE_canAddToWritePipeline(t); ) try {
15364
15386
  const e = await __PRIVATE_localStoreGetNextMutationBatch(t.localStore, r);
15365
15387
  if (null === e) {
15366
- 0 === t.ha.length && n.N_();
15388
+ 0 === t.Ia.length && n.k_();
15367
15389
  break;
15368
15390
  }
15369
15391
  r = e.batchId, __PRIVATE_addToWritePipeline(t, e);
@@ -15377,20 +15399,20 @@ async function __PRIVATE_fillWritePipeline(e) {
15377
15399
  * Returns true if we can add to the write pipeline (i.e. the network is
15378
15400
  * enabled and the write pipeline is not full).
15379
15401
  */ function __PRIVATE_canAddToWritePipeline(e) {
15380
- return __PRIVATE_canUseNetwork(e) && e.ha.length < 10;
15402
+ return __PRIVATE_canUseNetwork(e) && e.Ia.length < 10;
15381
15403
  }
15382
15404
 
15383
15405
  /**
15384
15406
  * Queues additional writes to be sent to the write stream, sending them
15385
15407
  * immediately if the write stream is established.
15386
15408
  */ function __PRIVATE_addToWritePipeline(e, t) {
15387
- e.ha.push(t);
15409
+ e.Ia.push(t);
15388
15410
  const n = __PRIVATE_ensureWriteStream(e);
15389
- n.M_() && n.Y_ && n.Z_(t.mutations);
15411
+ n.N_() && n.ea && n.ta(t.mutations);
15390
15412
  }
15391
15413
 
15392
15414
  function __PRIVATE_shouldStartWriteStream(e) {
15393
- return __PRIVATE_canUseNetwork(e) && !__PRIVATE_ensureWriteStream(e).F_() && e.ha.length > 0;
15415
+ return __PRIVATE_canUseNetwork(e) && !__PRIVATE_ensureWriteStream(e).O_() && e.Ia.length > 0;
15394
15416
  }
15395
15417
 
15396
15418
  function __PRIVATE_startWriteStream(e) {
@@ -15398,17 +15420,17 @@ function __PRIVATE_startWriteStream(e) {
15398
15420
  }
15399
15421
 
15400
15422
  async function __PRIVATE_onWriteStreamOpen(e) {
15401
- __PRIVATE_ensureWriteStream(e).ta();
15423
+ __PRIVATE_ensureWriteStream(e).ia();
15402
15424
  }
15403
15425
 
15404
15426
  async function __PRIVATE_onWriteHandshakeComplete(e) {
15405
15427
  const t = __PRIVATE_ensureWriteStream(e);
15406
15428
  // Send the write pipeline now that the stream is established.
15407
- for (const n of e.ha) t.Z_(n.mutations);
15429
+ for (const n of e.Ia) t.ta(n.mutations);
15408
15430
  }
15409
15431
 
15410
15432
  async function __PRIVATE_onMutationResult(e, t, n) {
15411
- const r = e.ha.shift(), i = MutationBatchResult.from(r, t, n);
15433
+ const r = e.Ia.shift(), i = MutationBatchResult.from(r, t, n);
15412
15434
  await __PRIVATE_executeWithRecovery(e, (() => e.remoteSyncer.applySuccessfulWrite(i))),
15413
15435
  // It's possible that with the completion of this mutation another
15414
15436
  // slot has freed up.
@@ -15418,7 +15440,7 @@ async function __PRIVATE_onMutationResult(e, t, n) {
15418
15440
  async function __PRIVATE_onWriteStreamClose(e, t) {
15419
15441
  // If the write stream closed after the write handshake completes, a write
15420
15442
  // operation failed and we fail the pending operation.
15421
- t && __PRIVATE_ensureWriteStream(e).Y_ &&
15443
+ t && __PRIVATE_ensureWriteStream(e).ea &&
15422
15444
  // This error affects the actual write.
15423
15445
  await async function __PRIVATE_handleWriteError(e, t) {
15424
15446
  // Only handle permanent errors here. If it's transient, just let the retry
@@ -15428,11 +15450,11 @@ async function __PRIVATE_onWriteStreamClose(e, t) {
15428
15450
  }(t.code)) {
15429
15451
  // This was a permanent error, the request itself was the problem
15430
15452
  // so it's not going to succeed if we resend it.
15431
- const n = e.ha.shift();
15453
+ const n = e.Ia.shift();
15432
15454
  // In this case it's also unlikely that the server itself is melting
15433
15455
  // down -- this was just a bad request so inhibit backoff on the next
15434
15456
  // restart.
15435
- __PRIVATE_ensureWriteStream(e).O_(), await __PRIVATE_executeWithRecovery(e, (() => e.remoteSyncer.rejectFailedWrite(n.batchId, t))),
15457
+ __PRIVATE_ensureWriteStream(e).L_(), await __PRIVATE_executeWithRecovery(e, (() => e.remoteSyncer.rejectFailedWrite(n.batchId, t))),
15436
15458
  // It's possible that with the completion of this mutation
15437
15459
  // another slot has freed up.
15438
15460
  await __PRIVATE_fillWritePipeline(e);
@@ -15450,19 +15472,19 @@ async function __PRIVATE_remoteStoreHandleCredentialChange(e, t) {
15450
15472
  // Tear down and re-create our network streams. This will ensure we get a
15451
15473
  // fresh auth token for the new user and re-fill the write pipeline with
15452
15474
  // new mutations from the LocalStore (since mutations are per-user).
15453
- n.Ta.add(3 /* OfflineCause.CredentialChange */), await __PRIVATE_disableNetworkInternal(n),
15475
+ n.da.add(3 /* OfflineCause.CredentialChange */), await __PRIVATE_disableNetworkInternal(n),
15454
15476
  r &&
15455
15477
  // Don't set the network status to Unknown if we are offline.
15456
- n.da.set("Unknown" /* OnlineState.Unknown */), await n.remoteSyncer.handleCredentialChange(t),
15457
- n.Ta.delete(3 /* OfflineCause.CredentialChange */), await __PRIVATE_enableNetworkInternal(n);
15478
+ n.Va.set("Unknown" /* OnlineState.Unknown */), await n.remoteSyncer.handleCredentialChange(t),
15479
+ n.da.delete(3 /* OfflineCause.CredentialChange */), await __PRIVATE_enableNetworkInternal(n);
15458
15480
  }
15459
15481
 
15460
15482
  /**
15461
15483
  * Toggles the network state when the client gains or loses its primary lease.
15462
15484
  */ async function __PRIVATE_remoteStoreApplyPrimaryState(e, t) {
15463
15485
  const n = __PRIVATE_debugCast(e);
15464
- t ? (n.Ta.delete(2 /* OfflineCause.IsSecondary */), await __PRIVATE_enableNetworkInternal(n)) : t || (n.Ta.add(2 /* OfflineCause.IsSecondary */),
15465
- await __PRIVATE_disableNetworkInternal(n), n.da.set("Unknown" /* OnlineState.Unknown */));
15486
+ t ? (n.da.delete(2 /* OfflineCause.IsSecondary */), await __PRIVATE_enableNetworkInternal(n)) : t || (n.da.add(2 /* OfflineCause.IsSecondary */),
15487
+ await __PRIVATE_disableNetworkInternal(n), n.Va.set("Unknown" /* OnlineState.Unknown */));
15466
15488
  }
15467
15489
 
15468
15490
  /**
@@ -15473,11 +15495,11 @@ async function __PRIVATE_remoteStoreHandleCredentialChange(e, t) {
15473
15495
  * PORTING NOTE: On iOS and Android, the WatchStream gets registered on startup.
15474
15496
  * This is not done on Web to allow it to be tree-shaken.
15475
15497
  */ function __PRIVATE_ensureWatchStream(e) {
15476
- return e.Ra || (
15498
+ return e.fa || (
15477
15499
  // Create stream (but note that it is not started yet).
15478
- e.Ra = function __PRIVATE_newPersistentWatchStream(e, t, n) {
15500
+ e.fa = function __PRIVATE_newPersistentWatchStream(e, t, n) {
15479
15501
  const r = __PRIVATE_debugCast(e);
15480
- return r.ra(), new __PRIVATE_PersistentListenStream(t, r.connection, r.authCredentials, r.appCheckCredentials, r.serializer, n);
15502
+ return r.oa(), new __PRIVATE_PersistentListenStream(t, r.connection, r.authCredentials, r.appCheckCredentials, r.serializer, n);
15481
15503
  }
15482
15504
  /**
15483
15505
  * @license
@@ -15498,11 +15520,11 @@ async function __PRIVATE_remoteStoreHandleCredentialChange(e, t) {
15498
15520
  e_: __PRIVATE_onWatchStreamConnected.bind(null, e),
15499
15521
  n_: __PRIVATE_onWatchStreamOpen.bind(null, e),
15500
15522
  i_: __PRIVATE_onWatchStreamClose.bind(null, e),
15501
- j_: __PRIVATE_onWatchStreamChange.bind(null, e)
15502
- }), e.Ia.push((async t => {
15503
- t ? (e.Ra.O_(), __PRIVATE_shouldStartWatchStream(e) ? __PRIVATE_startWatchStream(e) : e.da.set("Unknown" /* OnlineState.Unknown */)) : (await e.Ra.stop(),
15523
+ Y_: __PRIVATE_onWatchStreamChange.bind(null, e)
15524
+ }), e.Aa.push((async t => {
15525
+ t ? (e.fa.L_(), __PRIVATE_shouldStartWatchStream(e) ? __PRIVATE_startWatchStream(e) : e.Va.set("Unknown" /* OnlineState.Unknown */)) : (await e.fa.stop(),
15504
15526
  __PRIVATE_cleanUpWatchStreamState(e));
15505
- }))), e.Ra;
15527
+ }))), e.fa;
15506
15528
  }
15507
15529
 
15508
15530
  /**
@@ -15513,23 +15535,23 @@ async function __PRIVATE_remoteStoreHandleCredentialChange(e, t) {
15513
15535
  * PORTING NOTE: On iOS and Android, the WriteStream gets registered on startup.
15514
15536
  * This is not done on Web to allow it to be tree-shaken.
15515
15537
  */ function __PRIVATE_ensureWriteStream(e) {
15516
- return e.Va || (
15538
+ return e.ga || (
15517
15539
  // Create stream (but note that it is not started yet).
15518
- e.Va = function __PRIVATE_newPersistentWriteStream(e, t, n) {
15540
+ e.ga = function __PRIVATE_newPersistentWriteStream(e, t, n) {
15519
15541
  const r = __PRIVATE_debugCast(e);
15520
- return r.ra(), new __PRIVATE_PersistentWriteStream(t, r.connection, r.authCredentials, r.appCheckCredentials, r.serializer, n);
15542
+ return r.oa(), new __PRIVATE_PersistentWriteStream(t, r.connection, r.authCredentials, r.appCheckCredentials, r.serializer, n);
15521
15543
  }(e.datastore, e.asyncQueue, {
15522
15544
  e_: () => Promise.resolve(),
15523
15545
  n_: __PRIVATE_onWriteStreamOpen.bind(null, e),
15524
15546
  i_: __PRIVATE_onWriteStreamClose.bind(null, e),
15525
- X_: __PRIVATE_onWriteHandshakeComplete.bind(null, e),
15526
- ea: __PRIVATE_onMutationResult.bind(null, e)
15527
- }), e.Ia.push((async t => {
15528
- t ? (e.Va.O_(),
15547
+ na: __PRIVATE_onWriteHandshakeComplete.bind(null, e),
15548
+ ra: __PRIVATE_onMutationResult.bind(null, e)
15549
+ }), e.Aa.push((async t => {
15550
+ t ? (e.ga.L_(),
15529
15551
  // This will start the write stream if necessary.
15530
- await __PRIVATE_fillWritePipeline(e)) : (await e.Va.stop(), e.ha.length > 0 && (__PRIVATE_logDebug(Xt, `Stopping write stream with ${e.ha.length} pending writes`),
15531
- e.ha = []));
15532
- }))), e.Va;
15552
+ await __PRIVATE_fillWritePipeline(e)) : (await e.ga.stop(), e.Ia.length > 0 && (__PRIVATE_logDebug(Xt, `Stopping write stream with ${e.Ia.length} pending writes`),
15553
+ e.Ia = []));
15554
+ }))), e.ga;
15533
15555
  }
15534
15556
 
15535
15557
  /**
@@ -15742,25 +15764,25 @@ class DelayedOperation {
15742
15764
  * duplicate events for the same doc.
15743
15765
  */ class __PRIVATE_DocumentChangeSet {
15744
15766
  constructor() {
15745
- this.ma = new SortedMap(DocumentKey.comparator);
15767
+ this.pa = new SortedMap(DocumentKey.comparator);
15746
15768
  }
15747
15769
  track(e) {
15748
- const t = e.doc.key, n = this.ma.get(t);
15770
+ const t = e.doc.key, n = this.pa.get(t);
15749
15771
  n ?
15750
15772
  // Merge the new change with the existing change.
15751
- 0 /* ChangeType.Added */ !== e.type && 3 /* ChangeType.Metadata */ === n.type ? this.ma = this.ma.insert(t, e) : 3 /* ChangeType.Metadata */ === e.type && 1 /* ChangeType.Removed */ !== n.type ? this.ma = this.ma.insert(t, {
15773
+ 0 /* ChangeType.Added */ !== e.type && 3 /* ChangeType.Metadata */ === n.type ? this.pa = this.pa.insert(t, e) : 3 /* ChangeType.Metadata */ === e.type && 1 /* ChangeType.Removed */ !== n.type ? this.pa = this.pa.insert(t, {
15752
15774
  type: n.type,
15753
15775
  doc: e.doc
15754
- }) : 2 /* ChangeType.Modified */ === e.type && 2 /* ChangeType.Modified */ === n.type ? this.ma = this.ma.insert(t, {
15776
+ }) : 2 /* ChangeType.Modified */ === e.type && 2 /* ChangeType.Modified */ === n.type ? this.pa = this.pa.insert(t, {
15755
15777
  type: 2 /* ChangeType.Modified */ ,
15756
15778
  doc: e.doc
15757
- }) : 2 /* ChangeType.Modified */ === e.type && 0 /* ChangeType.Added */ === n.type ? this.ma = this.ma.insert(t, {
15779
+ }) : 2 /* ChangeType.Modified */ === e.type && 0 /* ChangeType.Added */ === n.type ? this.pa = this.pa.insert(t, {
15758
15780
  type: 0 /* ChangeType.Added */ ,
15759
15781
  doc: e.doc
15760
- }) : 1 /* ChangeType.Removed */ === e.type && 0 /* ChangeType.Added */ === n.type ? this.ma = this.ma.remove(t) : 1 /* ChangeType.Removed */ === e.type && 2 /* ChangeType.Modified */ === n.type ? this.ma = this.ma.insert(t, {
15782
+ }) : 1 /* ChangeType.Removed */ === e.type && 0 /* ChangeType.Added */ === n.type ? this.pa = this.pa.remove(t) : 1 /* ChangeType.Removed */ === e.type && 2 /* ChangeType.Modified */ === n.type ? this.pa = this.pa.insert(t, {
15761
15783
  type: 1 /* ChangeType.Removed */ ,
15762
15784
  doc: n.doc
15763
- }) : 0 /* ChangeType.Added */ === e.type && 1 /* ChangeType.Removed */ === n.type ? this.ma = this.ma.insert(t, {
15785
+ }) : 0 /* ChangeType.Added */ === e.type && 1 /* ChangeType.Removed */ === n.type ? this.pa = this.pa.insert(t, {
15764
15786
  type: 2 /* ChangeType.Modified */ ,
15765
15787
  doc: e.doc
15766
15788
  }) :
@@ -15773,12 +15795,12 @@ class DelayedOperation {
15773
15795
  // Removed->Metadata
15774
15796
  fail(63341, {
15775
15797
  Vt: e,
15776
- fa: n
15777
- }) : this.ma = this.ma.insert(t, e);
15798
+ ya: n
15799
+ }) : this.pa = this.pa.insert(t, e);
15778
15800
  }
15779
- ga() {
15801
+ wa() {
15780
15802
  const e = [];
15781
- return this.ma.inorderTraversal(((t, n) => {
15803
+ return this.pa.inorderTraversal(((t, n) => {
15782
15804
  e.push(n);
15783
15805
  })), e;
15784
15806
  }
@@ -15834,25 +15856,25 @@ class ViewSnapshot {
15834
15856
  * tracked by EventManager.
15835
15857
  */ class __PRIVATE_QueryListenersInfo {
15836
15858
  constructor() {
15837
- this.pa = void 0, this.ya = [];
15859
+ this.ba = void 0, this.Sa = [];
15838
15860
  }
15839
15861
  // Helper methods that checks if the query has listeners that listening to remote store
15840
- wa() {
15841
- return this.ya.some((e => e.Sa()));
15862
+ Da() {
15863
+ return this.Sa.some((e => e.va()));
15842
15864
  }
15843
15865
  }
15844
15866
 
15845
15867
  class __PRIVATE_EventManagerImpl {
15846
15868
  constructor() {
15847
15869
  this.queries = __PRIVATE_newQueriesObjectMap(), this.onlineState = "Unknown" /* OnlineState.Unknown */ ,
15848
- this.ba = new Set;
15870
+ this.Ca = new Set;
15849
15871
  }
15850
15872
  terminate() {
15851
15873
  !function __PRIVATE_errorAllTargets(e, t) {
15852
15874
  const n = __PRIVATE_debugCast(e), r = n.queries;
15853
15875
  // Prevent further access by clearing ObjectMap.
15854
15876
  n.queries = __PRIVATE_newQueriesObjectMap(), r.forEach(((e, n) => {
15855
- for (const e of n.ya) e.onError(t);
15877
+ for (const e of n.Sa) e.onError(t);
15856
15878
  }));
15857
15879
  }
15858
15880
  // Call all global snapshot listeners that have been set.
@@ -15869,19 +15891,19 @@ async function __PRIVATE_eventManagerListen(e, t) {
15869
15891
  let r = 3 /* ListenerSetupAction.NoActionRequired */;
15870
15892
  const i = t.query;
15871
15893
  let s = n.queries.get(i);
15872
- s ? !s.wa() && t.Sa() && (
15894
+ s ? !s.Da() && t.va() && (
15873
15895
  // Query has been listening to local cache, and tries to add a new listener sourced from watch.
15874
15896
  r = 2 /* ListenerSetupAction.RequireWatchConnectionOnly */) : (s = new __PRIVATE_QueryListenersInfo,
15875
- r = t.Sa() ? 0 /* ListenerSetupAction.InitializeLocalListenAndRequireWatchConnection */ : 1 /* ListenerSetupAction.InitializeLocalListenOnly */);
15897
+ r = t.va() ? 0 /* ListenerSetupAction.InitializeLocalListenAndRequireWatchConnection */ : 1 /* ListenerSetupAction.InitializeLocalListenOnly */);
15876
15898
  try {
15877
15899
  switch (r) {
15878
15900
  case 0 /* ListenerSetupAction.InitializeLocalListenAndRequireWatchConnection */ :
15879
- s.pa = await n.onListen(i,
15901
+ s.ba = await n.onListen(i,
15880
15902
  /** enableRemoteListen= */ !0);
15881
15903
  break;
15882
15904
 
15883
15905
  case 1 /* ListenerSetupAction.InitializeLocalListenOnly */ :
15884
- s.pa = await n.onListen(i,
15906
+ s.ba = await n.onListen(i,
15885
15907
  /** enableRemoteListen= */ !1);
15886
15908
  break;
15887
15909
 
@@ -15892,10 +15914,10 @@ async function __PRIVATE_eventManagerListen(e, t) {
15892
15914
  const n = __PRIVATE_wrapInUserErrorIfRecoverable(e, `Initialization of query '${__PRIVATE_stringifyQuery(t.query)}' failed`);
15893
15915
  return void t.onError(n);
15894
15916
  }
15895
- if (n.queries.set(i, s), s.ya.push(t),
15917
+ if (n.queries.set(i, s), s.Sa.push(t),
15896
15918
  // Run global snapshot listeners if a consistent snapshot has been emitted.
15897
- t.Da(n.onlineState), s.pa) {
15898
- t.va(s.pa) && __PRIVATE_raiseSnapshotsInSyncEvent(n);
15919
+ t.Fa(n.onlineState), s.ba) {
15920
+ t.Ma(s.ba) && __PRIVATE_raiseSnapshotsInSyncEvent(n);
15899
15921
  }
15900
15922
  }
15901
15923
 
@@ -15904,8 +15926,8 @@ async function __PRIVATE_eventManagerUnlisten(e, t) {
15904
15926
  let i = 3 /* ListenerRemovalAction.NoActionRequired */;
15905
15927
  const s = n.queries.get(r);
15906
15928
  if (s) {
15907
- const e = s.ya.indexOf(t);
15908
- e >= 0 && (s.ya.splice(e, 1), 0 === s.ya.length ? i = t.Sa() ? 0 /* ListenerRemovalAction.TerminateLocalListenAndRequireWatchDisconnection */ : 1 /* ListenerRemovalAction.TerminateLocalListenOnly */ : !s.wa() && t.Sa() && (
15929
+ const e = s.Sa.indexOf(t);
15930
+ e >= 0 && (s.Sa.splice(e, 1), 0 === s.Sa.length ? i = t.va() ? 0 /* ListenerRemovalAction.TerminateLocalListenAndRequireWatchDisconnection */ : 1 /* ListenerRemovalAction.TerminateLocalListenOnly */ : !s.Da() && t.va() && (
15909
15931
  // The removed listener is the last one that sourced from watch.
15910
15932
  i = 2 /* ListenerRemovalAction.RequireWatchDisconnectionOnly */));
15911
15933
  }
@@ -15932,8 +15954,8 @@ function __PRIVATE_eventManagerOnWatchChange(e, t) {
15932
15954
  for (const e of t) {
15933
15955
  const t = e.query, i = n.queries.get(t);
15934
15956
  if (i) {
15935
- for (const t of i.ya) t.va(e) && (r = !0);
15936
- i.pa = e;
15957
+ for (const t of i.Sa) t.Ma(e) && (r = !0);
15958
+ i.ba = e;
15937
15959
  }
15938
15960
  }
15939
15961
  r && __PRIVATE_raiseSnapshotsInSyncEvent(n);
@@ -15941,14 +15963,14 @@ function __PRIVATE_eventManagerOnWatchChange(e, t) {
15941
15963
 
15942
15964
  function __PRIVATE_eventManagerOnWatchError(e, t, n) {
15943
15965
  const r = __PRIVATE_debugCast(e), i = r.queries.get(t);
15944
- if (i) for (const e of i.ya) e.onError(n);
15966
+ if (i) for (const e of i.Sa) e.onError(n);
15945
15967
  // Remove all listeners. NOTE: We don't need to call syncEngine.unlisten()
15946
15968
  // after an error.
15947
15969
  r.queries.delete(t);
15948
15970
  }
15949
15971
 
15950
15972
  function __PRIVATE_raiseSnapshotsInSyncEvent(e) {
15951
- e.ba.forEach((e => {
15973
+ e.Ca.forEach((e => {
15952
15974
  e.next();
15953
15975
  }));
15954
15976
  }
@@ -15956,7 +15978,7 @@ function __PRIVATE_raiseSnapshotsInSyncEvent(e) {
15956
15978
  var en, tn;
15957
15979
 
15958
15980
  /** Listen to both cache and server changes */
15959
- (tn = en || (en = {})).Ca = "default",
15981
+ (tn = en || (en = {})).xa = "default",
15960
15982
  /** Listen to changes in cache only */
15961
15983
  tn.Cache = "cache";
15962
15984
 
@@ -15968,12 +15990,12 @@ tn.Cache = "cache";
15968
15990
  */
15969
15991
  class __PRIVATE_QueryListener {
15970
15992
  constructor(e, t, n) {
15971
- this.query = e, this.Fa = t,
15993
+ this.query = e, this.Oa = t,
15972
15994
  /**
15973
15995
  * Initial snapshots (e.g. from cache) may not be propagated to the wrapped
15974
15996
  * observer. This flag is set to true once we've actually raised an event.
15975
15997
  */
15976
- this.Ma = !1, this.xa = null, this.onlineState = "Unknown" /* OnlineState.Unknown */ ,
15998
+ this.Na = !1, this.Ba = null, this.onlineState = "Unknown" /* OnlineState.Unknown */ ,
15977
15999
  this.options = n || {};
15978
16000
  }
15979
16001
  /**
@@ -15981,7 +16003,7 @@ class __PRIVATE_QueryListener {
15981
16003
  * if applicable (depending on what changed, whether the user has opted into
15982
16004
  * metadata-only changes, etc.). Returns true if a user-facing event was
15983
16005
  * indeed raised.
15984
- */ va(e) {
16006
+ */ Ma(e) {
15985
16007
  if (!this.options.includeMetadataChanges) {
15986
16008
  // Remove the metadata only changes.
15987
16009
  const t = [];
@@ -15990,49 +16012,49 @@ class __PRIVATE_QueryListener {
15990
16012
  /* excludesMetadataChanges= */ !0, e.hasCachedResults);
15991
16013
  }
15992
16014
  let t = !1;
15993
- return this.Ma ? this.Oa(e) && (this.Fa.next(e), t = !0) : this.Na(e, this.onlineState) && (this.Ba(e),
15994
- t = !0), this.xa = e, t;
16015
+ return this.Na ? this.La(e) && (this.Oa.next(e), t = !0) : this.ka(e, this.onlineState) && (this.qa(e),
16016
+ t = !0), this.Ba = e, t;
15995
16017
  }
15996
16018
  onError(e) {
15997
- this.Fa.error(e);
16019
+ this.Oa.error(e);
15998
16020
  }
15999
- /** Returns whether a snapshot was raised. */ Da(e) {
16021
+ /** Returns whether a snapshot was raised. */ Fa(e) {
16000
16022
  this.onlineState = e;
16001
16023
  let t = !1;
16002
- return this.xa && !this.Ma && this.Na(this.xa, e) && (this.Ba(this.xa), t = !0),
16024
+ return this.Ba && !this.Na && this.ka(this.Ba, e) && (this.qa(this.Ba), t = !0),
16003
16025
  t;
16004
16026
  }
16005
- Na(e, t) {
16027
+ ka(e, t) {
16006
16028
  // Always raise the first event when we're synced
16007
16029
  if (!e.fromCache) return !0;
16008
16030
  // Always raise event if listening to cache
16009
- if (!this.Sa()) return !0;
16031
+ if (!this.va()) return !0;
16010
16032
  // NOTE: We consider OnlineState.Unknown as online (it should become Offline
16011
16033
  // or Online if we wait long enough).
16012
16034
  const n = "Offline" /* OnlineState.Offline */ !== t;
16013
16035
  // Don't raise the event if we're online, aren't synced yet (checked
16014
16036
  // above) and are waiting for a sync.
16015
- return (!this.options.La || !n) && (!e.docs.isEmpty() || e.hasCachedResults || "Offline" /* OnlineState.Offline */ === t);
16037
+ return (!this.options.Qa || !n) && (!e.docs.isEmpty() || e.hasCachedResults || "Offline" /* OnlineState.Offline */ === t);
16016
16038
  // Raise data from cache if we have any documents, have cached results before,
16017
16039
  // or we are offline.
16018
16040
  }
16019
- Oa(e) {
16041
+ La(e) {
16020
16042
  // We don't need to handle includeDocumentMetadataChanges here because
16021
16043
  // the Metadata only changes have already been stripped out if needed.
16022
16044
  // At this point the only changes we will see are the ones we should
16023
16045
  // propagate.
16024
16046
  if (e.docChanges.length > 0) return !0;
16025
- const t = this.xa && this.xa.hasPendingWrites !== e.hasPendingWrites;
16047
+ const t = this.Ba && this.Ba.hasPendingWrites !== e.hasPendingWrites;
16026
16048
  return !(!e.syncStateChanged && !t) && !0 === this.options.includeMetadataChanges;
16027
16049
  // Generally we should have hit one of the cases above, but it's possible
16028
16050
  // to get here if there were only metadata docChanges and they got
16029
16051
  // stripped out.
16030
16052
  }
16031
- Ba(e) {
16053
+ qa(e) {
16032
16054
  e = ViewSnapshot.fromInitialDocuments(e.query, e.docs, e.mutatedKeys, e.fromCache, e.hasCachedResults),
16033
- this.Ma = !0, this.Fa.next(e);
16055
+ this.Na = !0, this.Oa.next(e);
16034
16056
  }
16035
- Sa() {
16057
+ va() {
16036
16058
  return this.options.source !== en.Cache;
16037
16059
  }
16038
16060
  }
@@ -16060,10 +16082,10 @@ class __PRIVATE_QueryListener {
16060
16082
  constructor(e,
16061
16083
  // How many bytes this element takes to store in the bundle.
16062
16084
  t) {
16063
- this.ka = e, this.byteLength = t;
16085
+ this.$a = e, this.byteLength = t;
16064
16086
  }
16065
- qa() {
16066
- return "metadata" in this.ka;
16087
+ Ua() {
16088
+ return "metadata" in this.$a;
16067
16089
  }
16068
16090
  }
16069
16091
 
@@ -16107,7 +16129,7 @@ class __PRIVATE_QueryListener {
16107
16129
  * storage and provide progress update while loading.
16108
16130
  */ class __PRIVATE_BundleLoader {
16109
16131
  constructor(e, t, n) {
16110
- this.Qa = e, this.localStore = t, this.serializer = n,
16132
+ this.Ka = e, this.localStore = t, this.serializer = n,
16111
16133
  /** Batched queries to be saved into storage */
16112
16134
  this.queries = [],
16113
16135
  /** Batched documents to be saved into storage */
@@ -16120,21 +16142,21 @@ class __PRIVATE_QueryListener {
16120
16142
  *
16121
16143
  * Returns a new progress if adding the element leads to a new progress,
16122
16144
  * otherwise returns null.
16123
- */ $a(e) {
16145
+ */ Wa(e) {
16124
16146
  this.progress.bytesLoaded += e.byteLength;
16125
16147
  let t = this.progress.documentsLoaded;
16126
- if (e.ka.namedQuery) this.queries.push(e.ka.namedQuery); else if (e.ka.documentMetadata) {
16148
+ if (e.$a.namedQuery) this.queries.push(e.$a.namedQuery); else if (e.$a.documentMetadata) {
16127
16149
  this.documents.push({
16128
- metadata: e.ka.documentMetadata
16129
- }), e.ka.documentMetadata.exists || ++t;
16130
- const n = ResourcePath.fromString(e.ka.documentMetadata.name);
16150
+ metadata: e.$a.documentMetadata
16151
+ }), e.$a.documentMetadata.exists || ++t;
16152
+ const n = ResourcePath.fromString(e.$a.documentMetadata.name);
16131
16153
  this.collectionGroups.add(n.get(n.length - 2));
16132
- } else e.ka.document && (this.documents[this.documents.length - 1].document = e.ka.document,
16154
+ } else e.$a.document && (this.documents[this.documents.length - 1].document = e.$a.document,
16133
16155
  ++t);
16134
16156
  return t !== this.progress.documentsLoaded ? (this.progress.documentsLoaded = t,
16135
16157
  Object.assign({}, this.progress)) : null;
16136
16158
  }
16137
- Ua(e) {
16159
+ Ga(e) {
16138
16160
  const t = new Map, n = new __PRIVATE_BundleConverterImpl(this.serializer);
16139
16161
  for (const r of e) if (r.metadata.queries) {
16140
16162
  const e = n.Us(r.metadata.name);
@@ -16148,12 +16170,12 @@ class __PRIVATE_QueryListener {
16148
16170
  /**
16149
16171
  * Update the progress to 'Success' and return the updated progress.
16150
16172
  */ async complete() {
16151
- const e = await __PRIVATE_localStoreApplyBundledDocuments(this.localStore, new __PRIVATE_BundleConverterImpl(this.serializer), this.documents, this.Qa.id), t = this.Ua(this.documents);
16173
+ const e = await __PRIVATE_localStoreApplyBundledDocuments(this.localStore, new __PRIVATE_BundleConverterImpl(this.serializer), this.documents, this.Ka.id), t = this.Ga(this.documents);
16152
16174
  for (const e of this.queries) await __PRIVATE_localStoreSaveNamedQuery(this.localStore, e, t.get(e.name));
16153
16175
  return this.progress.taskState = "Success", {
16154
16176
  progress: this.progress,
16155
- Ka: this.collectionGroups,
16156
- Wa: e
16177
+ za: this.collectionGroups,
16178
+ ja: e
16157
16179
  };
16158
16180
  }
16159
16181
  }
@@ -16211,7 +16233,7 @@ class __PRIVATE_RemovedLimboDocument {
16211
16233
  constructor(e,
16212
16234
  /** Documents included in the remote target */
16213
16235
  t) {
16214
- this.query = e, this.Ga = t, this.za = null, this.hasCachedResults = !1,
16236
+ this.query = e, this.Ha = t, this.Ja = null, this.hasCachedResults = !1,
16215
16237
  /**
16216
16238
  * A flag whether the view is current with the backend. A view is considered
16217
16239
  * current after it has seen the current flag from the backend and did not
@@ -16220,16 +16242,16 @@ class __PRIVATE_RemovedLimboDocument {
16220
16242
  */
16221
16243
  this.current = !1,
16222
16244
  /** Documents in the view but not in the remote target */
16223
- this.ja = __PRIVATE_documentKeySet(),
16245
+ this.Ya = __PRIVATE_documentKeySet(),
16224
16246
  /** Document Keys that have local changes */
16225
- this.mutatedKeys = __PRIVATE_documentKeySet(), this.Ha = __PRIVATE_newQueryComparator(e),
16226
- this.Ja = new DocumentSet(this.Ha);
16247
+ this.mutatedKeys = __PRIVATE_documentKeySet(), this.Za = __PRIVATE_newQueryComparator(e),
16248
+ this.Xa = new DocumentSet(this.Za);
16227
16249
  }
16228
16250
  /**
16229
16251
  * The set of remote documents that the server has told us belongs to the target associated with
16230
16252
  * this view.
16231
- */ get Ya() {
16232
- return this.Ga;
16253
+ */ get eu() {
16254
+ return this.Ha;
16233
16255
  }
16234
16256
  /**
16235
16257
  * Iterates over a set of doc changes, applies the query limit, and computes
@@ -16240,8 +16262,8 @@ class __PRIVATE_RemovedLimboDocument {
16240
16262
  * @param previousChanges - If this is being called with a refill, then start
16241
16263
  * with this set of docs and changes instead of the current view.
16242
16264
  * @returns a new set of docs, changes, and refill flag.
16243
- */ Za(e, t) {
16244
- const n = t ? t.Xa : new __PRIVATE_DocumentChangeSet, r = t ? t.Ja : this.Ja;
16265
+ */ tu(e, t) {
16266
+ const n = t ? t.nu : new __PRIVATE_DocumentChangeSet, r = t ? t.Xa : this.Xa;
16245
16267
  let i = t ? t.mutatedKeys : this.mutatedKeys, s = r, o = !1;
16246
16268
  // Track the last doc in a (full) limit. This is necessary, because some
16247
16269
  // update (a delete, or an update moving a doc past the old limit) might
@@ -16264,10 +16286,10 @@ class __PRIVATE_RemovedLimboDocument {
16264
16286
  u.data.isEqual(c.data) ? l !== h && (n.track({
16265
16287
  type: 3 /* ChangeType.Metadata */ ,
16266
16288
  doc: c
16267
- }), P = !0) : this.eu(u, c) || (n.track({
16289
+ }), P = !0) : this.ru(u, c) || (n.track({
16268
16290
  type: 2 /* ChangeType.Modified */ ,
16269
16291
  doc: c
16270
- }), P = !0, (_ && this.Ha(c, _) > 0 || a && this.Ha(c, a) < 0) && (
16292
+ }), P = !0, (_ && this.Za(c, _) > 0 || a && this.Za(c, a) < 0) && (
16271
16293
  // This doc moved from inside the limit to outside the limit.
16272
16294
  // That means there may be some other doc in the local cache
16273
16295
  // that should be included instead.
@@ -16292,13 +16314,13 @@ class __PRIVATE_RemovedLimboDocument {
16292
16314
  });
16293
16315
  }
16294
16316
  return {
16295
- Ja: s,
16296
- Xa: n,
16317
+ Xa: s,
16318
+ nu: n,
16297
16319
  Cs: o,
16298
16320
  mutatedKeys: i
16299
16321
  };
16300
16322
  }
16301
- eu(e, t) {
16323
+ ru(e, t) {
16302
16324
  // We suppress the initial change event for documents that were modified as
16303
16325
  // part of a write acknowledgment (e.g. when the value of a server transform
16304
16326
  // is applied) as Watch will send us the same document again.
@@ -16323,10 +16345,10 @@ class __PRIVATE_RemovedLimboDocument {
16323
16345
  */
16324
16346
  // PORTING NOTE: The iOS/Android clients always compute limbo document changes.
16325
16347
  applyChanges(e, t, n, r) {
16326
- const i = this.Ja;
16327
- this.Ja = e.Ja, this.mutatedKeys = e.mutatedKeys;
16348
+ const i = this.Xa;
16349
+ this.Xa = e.Xa, this.mutatedKeys = e.mutatedKeys;
16328
16350
  // Sort changes based on type and query comparator
16329
- const s = e.Xa.ga();
16351
+ const s = e.nu.wa();
16330
16352
  s.sort(((e, t) => function __PRIVATE_compareChangeType(e, t) {
16331
16353
  const order = e => {
16332
16354
  switch (e) {
@@ -16366,70 +16388,70 @@ class __PRIVATE_RemovedLimboDocument {
16366
16388
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16367
16389
  * See the License for the specific language governing permissions and
16368
16390
  * limitations under the License.
16369
- */ (e.type, t.type) || this.Ha(e.doc, t.doc))), this.tu(n), r = null != r && r;
16370
- const o = t && !r ? this.nu() : [], _ = 0 === this.ja.size && this.current && !r ? 1 /* SyncState.Synced */ : 0 /* SyncState.Local */ , a = _ !== this.za;
16391
+ */ (e.type, t.type) || this.Za(e.doc, t.doc))), this.iu(n), r = null != r && r;
16392
+ const o = t && !r ? this.su() : [], _ = 0 === this.Ya.size && this.current && !r ? 1 /* SyncState.Synced */ : 0 /* SyncState.Local */ , a = _ !== this.Ja;
16371
16393
  // We are at synced state if there is no limbo docs are waiting to be resolved, view is current
16372
16394
  // with the backend, and the query is not pending to reset due to existence filter mismatch.
16373
- if (this.za = _, 0 !== s.length || a) {
16395
+ if (this.Ja = _, 0 !== s.length || a) {
16374
16396
  return {
16375
- snapshot: new ViewSnapshot(this.query, e.Ja, i, s, e.mutatedKeys, 0 /* SyncState.Local */ === _, a,
16397
+ snapshot: new ViewSnapshot(this.query, e.Xa, i, s, e.mutatedKeys, 0 /* SyncState.Local */ === _, a,
16376
16398
  /* excludesMetadataChanges= */ !1, !!n && n.resumeToken.approximateByteSize() > 0),
16377
- ru: o
16399
+ ou: o
16378
16400
  };
16379
16401
  }
16380
16402
  // no changes
16381
16403
  return {
16382
- ru: o
16404
+ ou: o
16383
16405
  };
16384
16406
  }
16385
16407
  /**
16386
16408
  * Applies an OnlineState change to the view, potentially generating a
16387
16409
  * ViewChange if the view's syncState changes as a result.
16388
- */ Da(e) {
16410
+ */ Fa(e) {
16389
16411
  return this.current && "Offline" /* OnlineState.Offline */ === e ? (
16390
16412
  // If we're offline, set `current` to false and then call applyChanges()
16391
16413
  // to refresh our syncState and generate a ViewChange as appropriate. We
16392
16414
  // are guaranteed to get a new TargetChange that sets `current` back to
16393
16415
  // true once the client is back online.
16394
16416
  this.current = !1, this.applyChanges({
16395
- Ja: this.Ja,
16396
- Xa: new __PRIVATE_DocumentChangeSet,
16417
+ Xa: this.Xa,
16418
+ nu: new __PRIVATE_DocumentChangeSet,
16397
16419
  mutatedKeys: this.mutatedKeys,
16398
16420
  Cs: !1
16399
16421
  },
16400
16422
  /* limboResolutionEnabled= */ !1)) : {
16401
- ru: []
16423
+ ou: []
16402
16424
  };
16403
16425
  }
16404
16426
  /**
16405
16427
  * Returns whether the doc for the given key should be in limbo.
16406
- */ iu(e) {
16428
+ */ _u(e) {
16407
16429
  // If the remote end says it's part of this query, it's not in limbo.
16408
- return !this.Ga.has(e) && (
16430
+ return !this.Ha.has(e) && (
16409
16431
  // The local store doesn't think it's a result, so it shouldn't be in limbo.
16410
- !!this.Ja.has(e) && !this.Ja.get(e).hasLocalMutations);
16432
+ !!this.Xa.has(e) && !this.Xa.get(e).hasLocalMutations);
16411
16433
  }
16412
16434
  /**
16413
16435
  * Updates syncedDocuments, current, and limbo docs based on the given change.
16414
16436
  * Returns the list of changes to which docs are in limbo.
16415
- */ tu(e) {
16416
- e && (e.addedDocuments.forEach((e => this.Ga = this.Ga.add(e))), e.modifiedDocuments.forEach((e => {})),
16417
- e.removedDocuments.forEach((e => this.Ga = this.Ga.delete(e))), this.current = e.current);
16437
+ */ iu(e) {
16438
+ e && (e.addedDocuments.forEach((e => this.Ha = this.Ha.add(e))), e.modifiedDocuments.forEach((e => {})),
16439
+ e.removedDocuments.forEach((e => this.Ha = this.Ha.delete(e))), this.current = e.current);
16418
16440
  }
16419
- nu() {
16441
+ su() {
16420
16442
  // We can only determine limbo documents when we're in-sync with the server.
16421
16443
  if (!this.current) return [];
16422
16444
  // TODO(klimt): Do this incrementally so that it's not quadratic when
16423
16445
  // updating many documents.
16424
- const e = this.ja;
16425
- this.ja = __PRIVATE_documentKeySet(), this.Ja.forEach((e => {
16426
- this.iu(e.key) && (this.ja = this.ja.add(e.key));
16446
+ const e = this.Ya;
16447
+ this.Ya = __PRIVATE_documentKeySet(), this.Xa.forEach((e => {
16448
+ this._u(e.key) && (this.Ya = this.Ya.add(e.key));
16427
16449
  }));
16428
16450
  // Diff the new limbo docs with the old limbo docs.
16429
16451
  const t = [];
16430
16452
  return e.forEach((e => {
16431
- this.ja.has(e) || t.push(new __PRIVATE_RemovedLimboDocument(e));
16432
- })), this.ja.forEach((n => {
16453
+ this.Ya.has(e) || t.push(new __PRIVATE_RemovedLimboDocument(e));
16454
+ })), this.Ya.forEach((n => {
16433
16455
  e.has(n) || t.push(new __PRIVATE_AddedLimboDocument(n));
16434
16456
  })), t;
16435
16457
  }
@@ -16453,9 +16475,9 @@ class __PRIVATE_RemovedLimboDocument {
16453
16475
  * @returns The ViewChange that resulted from this synchronization.
16454
16476
  */
16455
16477
  // PORTING NOTE: Multi-tab only.
16456
- su(e) {
16457
- this.Ga = e.$s, this.ja = __PRIVATE_documentKeySet();
16458
- const t = this.Za(e.documents);
16478
+ au(e) {
16479
+ this.Ha = e.$s, this.Ya = __PRIVATE_documentKeySet();
16480
+ const t = this.tu(e.documents);
16459
16481
  return this.applyChanges(t, /* limboResolutionEnabled= */ !0);
16460
16482
  }
16461
16483
  /**
@@ -16464,8 +16486,8 @@ class __PRIVATE_RemovedLimboDocument {
16464
16486
  * `hasPendingWrites` status of the already established view.
16465
16487
  */
16466
16488
  // PORTING NOTE: Multi-tab only.
16467
- ou() {
16468
- return ViewSnapshot.fromInitialDocuments(this.query, this.Ja, this.mutatedKeys, 0 /* SyncState.Local */ === this.za, this.hasCachedResults);
16489
+ uu() {
16490
+ return ViewSnapshot.fromInitialDocuments(this.query, this.Xa, this.mutatedKeys, 0 /* SyncState.Local */ === this.Ja, this.hasCachedResults);
16469
16491
  }
16470
16492
  }
16471
16493
 
@@ -16505,7 +16527,7 @@ const nn = "SyncEngine";
16505
16527
  * decide whether it needs to manufacture a delete event for the target once
16506
16528
  * the target is CURRENT.
16507
16529
  */
16508
- this._u = !1;
16530
+ this.cu = !1;
16509
16531
  }
16510
16532
  }
16511
16533
 
@@ -16526,8 +16548,8 @@ const nn = "SyncEngine";
16526
16548
  // PORTING NOTE: Manages state synchronization in multi-tab environments.
16527
16549
  r, i, s) {
16528
16550
  this.localStore = e, this.remoteStore = t, this.eventManager = n, this.sharedClientState = r,
16529
- this.currentUser = i, this.maxConcurrentLimboResolutions = s, this.au = {}, this.uu = new ObjectMap((e => __PRIVATE_canonifyQuery(e)), __PRIVATE_queryEquals),
16530
- this.cu = new Map,
16551
+ this.currentUser = i, this.maxConcurrentLimboResolutions = s, this.lu = {}, this.hu = new ObjectMap((e => __PRIVATE_canonifyQuery(e)), __PRIVATE_queryEquals),
16552
+ this.Pu = new Map,
16531
16553
  /**
16532
16554
  * The keys of documents that are in limbo for which we haven't yet started a
16533
16555
  * limbo resolution query. The strings in this set are the result of calling
@@ -16537,28 +16559,28 @@ const nn = "SyncEngine";
16537
16559
  * of arbitrary elements and it also maintains insertion order, providing the
16538
16560
  * desired queue-like FIFO semantics.
16539
16561
  */
16540
- this.lu = new Set,
16562
+ this.Tu = new Set,
16541
16563
  /**
16542
16564
  * Keeps track of the target ID for each document that is in limbo with an
16543
16565
  * active target.
16544
16566
  */
16545
- this.hu = new SortedMap(DocumentKey.comparator),
16567
+ this.Iu = new SortedMap(DocumentKey.comparator),
16546
16568
  /**
16547
16569
  * Keeps track of the information about an active limbo resolution for each
16548
16570
  * active target ID that was started for the purpose of limbo resolution.
16549
16571
  */
16550
- this.Pu = new Map, this.Tu = new __PRIVATE_ReferenceSet,
16572
+ this.Eu = new Map, this.du = new __PRIVATE_ReferenceSet,
16551
16573
  /** Stores user completion handlers, indexed by User and BatchId. */
16552
- this.Iu = {},
16574
+ this.Au = {},
16553
16575
  /** Stores user callbacks waiting for all pending writes to be acknowledged. */
16554
- this.Eu = new Map, this.du = __PRIVATE_TargetIdGenerator.lr(), this.onlineState = "Unknown" /* OnlineState.Unknown */ ,
16576
+ this.Ru = new Map, this.Vu = __PRIVATE_TargetIdGenerator.lr(), this.onlineState = "Unknown" /* OnlineState.Unknown */ ,
16555
16577
  // The primary state is set to `true` or `false` immediately after Firestore
16556
16578
  // startup. In the interim, a client should only be considered primary if
16557
16579
  // `isPrimary` is true.
16558
- this.Au = void 0;
16580
+ this.mu = void 0;
16559
16581
  }
16560
16582
  get isPrimaryClient() {
16561
- return !0 === this.Au;
16583
+ return !0 === this.mu;
16562
16584
  }
16563
16585
  }
16564
16586
 
@@ -16570,7 +16592,7 @@ const nn = "SyncEngine";
16570
16592
  async function __PRIVATE_syncEngineListen(e, t, n = !0) {
16571
16593
  const r = __PRIVATE_ensureWatchCallbacks(e);
16572
16594
  let i;
16573
- const s = r.uu.get(t);
16595
+ const s = r.hu.get(t);
16574
16596
  return s ? (
16575
16597
  // PORTING NOTE: With Multi-Tab Web, it is possible that a query view
16576
16598
  // already exists when EventManager calls us for the first time. This
@@ -16578,7 +16600,7 @@ async function __PRIVATE_syncEngineListen(e, t, n = !0) {
16578
16600
  // behalf of another tab and the user of the primary also starts listening
16579
16601
  // to the query. EventManager will not have an assigned target ID in this
16580
16602
  // case and calls `listen` to obtain this ID.
16581
- r.sharedClientState.addLocalQueryTarget(s.targetId), i = s.view.ou()) : i = await __PRIVATE_allocateTargetAndMaybeListen(r, t, n,
16603
+ r.sharedClientState.addLocalQueryTarget(s.targetId), i = s.view.uu()) : i = await __PRIVATE_allocateTargetAndMaybeListen(r, t, n,
16582
16604
  /** shouldInitializeView= */ !0), i;
16583
16605
  }
16584
16606
 
@@ -16603,30 +16625,30 @@ async function __PRIVATE_allocateTargetAndMaybeListen(e, t, n, r) {
16603
16625
  // PORTING NOTE: On Web only, we inject the code that registers new Limbo
16604
16626
  // targets based on view changes. This allows us to only depend on Limbo
16605
16627
  // changes when user code includes queries.
16606
- e.Ru = (t, n, r) => async function __PRIVATE_applyDocChanges(e, t, n, r) {
16607
- let i = t.view.Za(n);
16628
+ e.fu = (t, n, r) => async function __PRIVATE_applyDocChanges(e, t, n, r) {
16629
+ let i = t.view.tu(n);
16608
16630
  i.Cs && (
16609
16631
  // The query has a limit and some docs were removed, so we need
16610
16632
  // to re-run the query against the local store to make sure we
16611
16633
  // didn't lose any good docs that had been past the limit.
16612
16634
  i = await __PRIVATE_localStoreExecuteQuery(e.localStore, t.query,
16613
- /* usePreviousResults= */ !1).then((({documents: e}) => t.view.Za(e, i))));
16635
+ /* usePreviousResults= */ !1).then((({documents: e}) => t.view.tu(e, i))));
16614
16636
  const s = r && r.targetChanges.get(t.targetId), o = r && null != r.targetMismatches.get(t.targetId), _ = t.view.applyChanges(i,
16615
16637
  /* limboResolutionEnabled= */ e.isPrimaryClient, s, o);
16616
- return __PRIVATE_updateTrackedLimbos(e, t.targetId, _.ru), _.snapshot;
16638
+ return __PRIVATE_updateTrackedLimbos(e, t.targetId, _.ou), _.snapshot;
16617
16639
  }(e, t, n, r);
16618
16640
  const s = await __PRIVATE_localStoreExecuteQuery(e.localStore, t,
16619
- /* usePreviousResults= */ !0), o = new __PRIVATE_View(t, s.$s), _ = o.Za(s.documents), a = TargetChange.createSynthesizedTargetChangeForCurrentChange(n, r && "Offline" /* OnlineState.Offline */ !== e.onlineState, i), u = o.applyChanges(_,
16641
+ /* usePreviousResults= */ !0), o = new __PRIVATE_View(t, s.$s), _ = o.tu(s.documents), a = TargetChange.createSynthesizedTargetChangeForCurrentChange(n, r && "Offline" /* OnlineState.Offline */ !== e.onlineState, i), u = o.applyChanges(_,
16620
16642
  /* limboResolutionEnabled= */ e.isPrimaryClient, a);
16621
- __PRIVATE_updateTrackedLimbos(e, n, u.ru);
16643
+ __PRIVATE_updateTrackedLimbos(e, n, u.ou);
16622
16644
  const c = new __PRIVATE_QueryView(t, n, o);
16623
- return e.uu.set(t, c), e.cu.has(n) ? e.cu.get(n).push(t) : e.cu.set(n, [ t ]), u.snapshot;
16645
+ return e.hu.set(t, c), e.Pu.has(n) ? e.Pu.get(n).push(t) : e.Pu.set(n, [ t ]), u.snapshot;
16624
16646
  }
16625
16647
 
16626
16648
  /** Stops listening to the query. */ async function __PRIVATE_syncEngineUnlisten(e, t, n) {
16627
- const r = __PRIVATE_debugCast(e), i = r.uu.get(t), s = r.cu.get(i.targetId);
16628
- if (s.length > 1) return r.cu.set(i.targetId, s.filter((e => !__PRIVATE_queryEquals(e, t)))),
16629
- void r.uu.delete(t);
16649
+ const r = __PRIVATE_debugCast(e), i = r.hu.get(t), s = r.Pu.get(i.targetId);
16650
+ if (s.length > 1) return r.Pu.set(i.targetId, s.filter((e => !__PRIVATE_queryEquals(e, t)))),
16651
+ void r.hu.delete(t);
16630
16652
  // No other queries are mapped to the target, clean up the query and the target.
16631
16653
  if (r.isPrimaryClient) {
16632
16654
  // We need to remove the local query target first to allow us to verify
@@ -16642,7 +16664,7 @@ async function __PRIVATE_allocateTargetAndMaybeListen(e, t, n, r) {
16642
16664
  }
16643
16665
 
16644
16666
  /** Unlistens to the remote store while still listening to the cache. */ async function __PRIVATE_triggerRemoteStoreUnlisten(e, t) {
16645
- const n = __PRIVATE_debugCast(e), r = n.uu.get(t), i = n.cu.get(r.targetId);
16667
+ const n = __PRIVATE_debugCast(e), r = n.hu.get(t), i = n.Pu.get(r.targetId);
16646
16668
  n.isPrimaryClient && 1 === i.length && (
16647
16669
  // PORTING NOTE: Unregister the target ID with local Firestore client as
16648
16670
  // watch target.
@@ -16704,9 +16726,9 @@ async function __PRIVATE_allocateTargetAndMaybeListen(e, t, n, r) {
16704
16726
  })));
16705
16727
  }(r.localStore, t);
16706
16728
  r.sharedClientState.addPendingMutation(e.batchId), function __PRIVATE_addMutationCallback(e, t, n) {
16707
- let r = e.Iu[e.currentUser.toKey()];
16729
+ let r = e.Au[e.currentUser.toKey()];
16708
16730
  r || (r = new SortedMap(__PRIVATE_primitiveComparator));
16709
- r = r.insert(t, n), e.Iu[e.currentUser.toKey()] = r;
16731
+ r = r.insert(t, n), e.Au[e.currentUser.toKey()] = r;
16710
16732
  }
16711
16733
  /**
16712
16734
  * Resolves or rejects the user callback for the given batch and then discards
@@ -16731,13 +16753,13 @@ async function __PRIVATE_allocateTargetAndMaybeListen(e, t, n, r) {
16731
16753
  const e = await __PRIVATE_localStoreApplyRemoteEventToLocalCache(n.localStore, t);
16732
16754
  // Update `receivedDocument` as appropriate for any limbo targets.
16733
16755
  t.targetChanges.forEach(((e, t) => {
16734
- const r = n.Pu.get(t);
16756
+ const r = n.Eu.get(t);
16735
16757
  r && (
16736
16758
  // Since this is a limbo resolution lookup, it's for a single document
16737
16759
  // and it could be added, modified, or removed, but not a combination.
16738
16760
  __PRIVATE_hardAssert(e.addedDocuments.size + e.modifiedDocuments.size + e.removedDocuments.size <= 1, 22616),
16739
- e.addedDocuments.size > 0 ? r._u = !0 : e.modifiedDocuments.size > 0 ? __PRIVATE_hardAssert(r._u, 14607) : e.removedDocuments.size > 0 && (__PRIVATE_hardAssert(r._u, 42227),
16740
- r._u = !1));
16761
+ e.addedDocuments.size > 0 ? r.cu = !0 : e.modifiedDocuments.size > 0 ? __PRIVATE_hardAssert(r.cu, 14607) : e.removedDocuments.size > 0 && (__PRIVATE_hardAssert(r.cu, 42227),
16762
+ r.cu = !1));
16741
16763
  })), await __PRIVATE_syncEngineEmitNewSnapsAndNotifyLocalStore(n, e, t);
16742
16764
  } catch (e) {
16743
16765
  await __PRIVATE_ignoreIfPrimaryLeaseLoss(e);
@@ -16755,19 +16777,19 @@ async function __PRIVATE_allocateTargetAndMaybeListen(e, t, n, r) {
16755
16777
  // SharedClientState.
16756
16778
  if (r.isPrimaryClient && 0 /* OnlineStateSource.RemoteStore */ === n || !r.isPrimaryClient && 1 /* OnlineStateSource.SharedClientState */ === n) {
16757
16779
  const e = [];
16758
- r.uu.forEach(((n, r) => {
16759
- const i = r.view.Da(t);
16780
+ r.hu.forEach(((n, r) => {
16781
+ const i = r.view.Fa(t);
16760
16782
  i.snapshot && e.push(i.snapshot);
16761
16783
  })), function __PRIVATE_eventManagerOnOnlineStateChange(e, t) {
16762
16784
  const n = __PRIVATE_debugCast(e);
16763
16785
  n.onlineState = t;
16764
16786
  let r = !1;
16765
16787
  n.queries.forEach(((e, n) => {
16766
- for (const e of n.ya)
16788
+ for (const e of n.Sa)
16767
16789
  // Run global snapshot listeners if a consistent snapshot has been emitted.
16768
- e.Da(t) && (r = !0);
16790
+ e.Fa(t) && (r = !0);
16769
16791
  })), r && __PRIVATE_raiseSnapshotsInSyncEvent(n);
16770
- }(r.eventManager, t), e.length && r.au.j_(e), r.onlineState = t, r.isPrimaryClient && r.sharedClientState.setOnlineState(t);
16792
+ }(r.eventManager, t), e.length && r.lu.Y_(e), r.onlineState = t, r.isPrimaryClient && r.sharedClientState.setOnlineState(t);
16771
16793
  }
16772
16794
  }
16773
16795
 
@@ -16785,7 +16807,7 @@ async function __PRIVATE_allocateTargetAndMaybeListen(e, t, n, r) {
16785
16807
  const r = __PRIVATE_debugCast(e);
16786
16808
  // PORTING NOTE: Multi-tab only.
16787
16809
  r.sharedClientState.updateQueryState(t, "rejected", n);
16788
- const i = r.Pu.get(t), s = i && i.key;
16810
+ const i = r.Eu.get(t), s = i && i.key;
16789
16811
  if (s) {
16790
16812
  // TODO(klimt): We really only should do the following on permission
16791
16813
  // denied errors, but we don't have the cause code here.
@@ -16807,7 +16829,7 @@ async function __PRIVATE_allocateTargetAndMaybeListen(e, t, n, r) {
16807
16829
  // RemoteEvent. If `applyRemoteEvent()` throws, we want to re-listen to
16808
16830
  // this query when the RemoteStore restarts the Watch stream, which should
16809
16831
  // re-trigger the target failure.
16810
- r.hu = r.hu.remove(s), r.Pu.delete(t), __PRIVATE_pumpEnqueuedLimboResolutions(r);
16832
+ r.Iu = r.Iu.remove(s), r.Eu.delete(t), __PRIVATE_pumpEnqueuedLimboResolutions(r);
16811
16833
  } else await __PRIVATE_localStoreReleaseTarget(r.localStore, t,
16812
16834
  /* keepPersistedTargetData */ !1).then((() => __PRIVATE_removeAndCleanupTarget(r, t, n))).catch(__PRIVATE_ignoreIfPrimaryLeaseLoss);
16813
16835
  }
@@ -16869,8 +16891,8 @@ async function __PRIVATE_syncEngineRejectFailedWrite(e, t, n) {
16869
16891
  if (e === G)
16870
16892
  // Trigger the callback right away if there is no pending writes at the moment.
16871
16893
  return void t.resolve();
16872
- const r = n.Eu.get(e) || [];
16873
- r.push(t), n.Eu.set(e, r);
16894
+ const r = n.Ru.get(e) || [];
16895
+ r.push(t), n.Ru.set(e, r);
16874
16896
  } catch (e) {
16875
16897
  const n = __PRIVATE_wrapInUserErrorIfRecoverable(e, "Initialization of waitForPendingWrites() operation failed");
16876
16898
  t.reject(n);
@@ -16881,28 +16903,28 @@ async function __PRIVATE_syncEngineRejectFailedWrite(e, t, n) {
16881
16903
  * Triggers the callbacks that are waiting for this batch id to get acknowledged by server,
16882
16904
  * if there are any.
16883
16905
  */ function __PRIVATE_triggerPendingWritesCallbacks(e, t) {
16884
- (e.Eu.get(t) || []).forEach((e => {
16906
+ (e.Ru.get(t) || []).forEach((e => {
16885
16907
  e.resolve();
16886
- })), e.Eu.delete(t);
16908
+ })), e.Ru.delete(t);
16887
16909
  }
16888
16910
 
16889
16911
  /** Reject all outstanding callbacks waiting for pending writes to complete. */ function __PRIVATE_processUserCallback(e, t, n) {
16890
16912
  const r = __PRIVATE_debugCast(e);
16891
- let i = r.Iu[r.currentUser.toKey()];
16913
+ let i = r.Au[r.currentUser.toKey()];
16892
16914
  // NOTE: Mutations restored from persistence won't have callbacks, so it's
16893
16915
  // okay for there to be no callback for this ID.
16894
16916
  if (i) {
16895
16917
  const e = i.get(t);
16896
- e && (n ? e.reject(n) : e.resolve(), i = i.remove(t)), r.Iu[r.currentUser.toKey()] = i;
16918
+ e && (n ? e.reject(n) : e.resolve(), i = i.remove(t)), r.Au[r.currentUser.toKey()] = i;
16897
16919
  }
16898
16920
  }
16899
16921
 
16900
16922
  function __PRIVATE_removeAndCleanupTarget(e, t, n = null) {
16901
16923
  e.sharedClientState.removeLocalQueryTarget(t);
16902
- for (const r of e.cu.get(t)) e.uu.delete(r), n && e.au.Vu(r, n);
16903
- if (e.cu.delete(t), e.isPrimaryClient) {
16904
- e.Tu.Hr(t).forEach((t => {
16905
- e.Tu.containsKey(t) ||
16924
+ for (const r of e.Pu.get(t)) e.hu.delete(r), n && e.lu.gu(r, n);
16925
+ if (e.Pu.delete(t), e.isPrimaryClient) {
16926
+ e.du.Hr(t).forEach((t => {
16927
+ e.du.containsKey(t) ||
16906
16928
  // We removed the last reference for this key
16907
16929
  __PRIVATE_removeLimboTarget(e, t);
16908
16930
  }));
@@ -16910,30 +16932,30 @@ function __PRIVATE_removeAndCleanupTarget(e, t, n = null) {
16910
16932
  }
16911
16933
 
16912
16934
  function __PRIVATE_removeLimboTarget(e, t) {
16913
- e.lu.delete(t.path.canonicalString());
16935
+ e.Tu.delete(t.path.canonicalString());
16914
16936
  // It's possible that the target already got removed because the query failed. In that case,
16915
16937
  // the key won't exist in `limboTargetsByKey`. Only do the cleanup if we still have the target.
16916
- const n = e.hu.get(t);
16917
- null !== n && (__PRIVATE_remoteStoreUnlisten(e.remoteStore, n), e.hu = e.hu.remove(t),
16918
- e.Pu.delete(n), __PRIVATE_pumpEnqueuedLimboResolutions(e));
16938
+ const n = e.Iu.get(t);
16939
+ null !== n && (__PRIVATE_remoteStoreUnlisten(e.remoteStore, n), e.Iu = e.Iu.remove(t),
16940
+ e.Eu.delete(n), __PRIVATE_pumpEnqueuedLimboResolutions(e));
16919
16941
  }
16920
16942
 
16921
16943
  function __PRIVATE_updateTrackedLimbos(e, t, n) {
16922
- for (const r of n) if (r instanceof __PRIVATE_AddedLimboDocument) e.Tu.addReference(r.key, t),
16944
+ for (const r of n) if (r instanceof __PRIVATE_AddedLimboDocument) e.du.addReference(r.key, t),
16923
16945
  __PRIVATE_trackLimboChange(e, r); else if (r instanceof __PRIVATE_RemovedLimboDocument) {
16924
- __PRIVATE_logDebug(nn, "Document no longer in limbo: " + r.key), e.Tu.removeReference(r.key, t);
16925
- e.Tu.containsKey(r.key) ||
16946
+ __PRIVATE_logDebug(nn, "Document no longer in limbo: " + r.key), e.du.removeReference(r.key, t);
16947
+ e.du.containsKey(r.key) ||
16926
16948
  // We removed the last reference for this key
16927
16949
  __PRIVATE_removeLimboTarget(e, r.key);
16928
16950
  } else fail(19791, {
16929
- mu: r
16951
+ pu: r
16930
16952
  });
16931
16953
  }
16932
16954
 
16933
16955
  function __PRIVATE_trackLimboChange(e, t) {
16934
16956
  const n = t.key, r = n.path.canonicalString();
16935
- e.hu.get(n) || e.lu.has(r) || (__PRIVATE_logDebug(nn, "New document in limbo: " + n),
16936
- e.lu.add(r), __PRIVATE_pumpEnqueuedLimboResolutions(e));
16957
+ e.Iu.get(n) || e.Tu.has(r) || (__PRIVATE_logDebug(nn, "New document in limbo: " + n),
16958
+ e.Tu.add(r), __PRIVATE_pumpEnqueuedLimboResolutions(e));
16937
16959
  }
16938
16960
 
16939
16961
  /**
@@ -16944,18 +16966,18 @@ function __PRIVATE_trackLimboChange(e, t) {
16944
16966
  * with "resource exhausted" errors which can lead to pathological client
16945
16967
  * behavior as seen in https://github.com/firebase/firebase-js-sdk/issues/2683.
16946
16968
  */ function __PRIVATE_pumpEnqueuedLimboResolutions(e) {
16947
- for (;e.lu.size > 0 && e.hu.size < e.maxConcurrentLimboResolutions; ) {
16948
- const t = e.lu.values().next().value;
16949
- e.lu.delete(t);
16950
- const n = new DocumentKey(ResourcePath.fromString(t)), r = e.du.next();
16951
- e.Pu.set(r, new LimboResolution(n)), e.hu = e.hu.insert(n, r), __PRIVATE_remoteStoreListen(e.remoteStore, new TargetData(__PRIVATE_queryToTarget(__PRIVATE_newQueryForPath(n.path)), r, "TargetPurposeLimboResolution" /* TargetPurpose.LimboResolution */ , __PRIVATE_ListenSequence.le));
16969
+ for (;e.Tu.size > 0 && e.Iu.size < e.maxConcurrentLimboResolutions; ) {
16970
+ const t = e.Tu.values().next().value;
16971
+ e.Tu.delete(t);
16972
+ const n = new DocumentKey(ResourcePath.fromString(t)), r = e.Vu.next();
16973
+ e.Eu.set(r, new LimboResolution(n)), e.Iu = e.Iu.insert(n, r), __PRIVATE_remoteStoreListen(e.remoteStore, new TargetData(__PRIVATE_queryToTarget(__PRIVATE_newQueryForPath(n.path)), r, "TargetPurposeLimboResolution" /* TargetPurpose.LimboResolution */ , __PRIVATE_ListenSequence.le));
16952
16974
  }
16953
16975
  }
16954
16976
 
16955
16977
  async function __PRIVATE_syncEngineEmitNewSnapsAndNotifyLocalStore(e, t, n) {
16956
16978
  const r = __PRIVATE_debugCast(e), i = [], s = [], o = [];
16957
- r.uu.isEmpty() || (r.uu.forEach(((e, _) => {
16958
- o.push(r.Ru(_, t, n).then((e => {
16979
+ r.hu.isEmpty() || (r.hu.forEach(((e, _) => {
16980
+ o.push(r.fu(_, t, n).then((e => {
16959
16981
  var t;
16960
16982
  // If there are changes, or we are handling a global snapshot, notify
16961
16983
  // secondary clients to update query state.
@@ -16973,7 +16995,7 @@ async function __PRIVATE_syncEngineEmitNewSnapsAndNotifyLocalStore(e, t, n) {
16973
16995
  s.push(t);
16974
16996
  }
16975
16997
  })));
16976
- })), await Promise.all(o), r.au.j_(i), await async function __PRIVATE_localStoreNotifyLocalViewChanges(e, t) {
16998
+ })), await Promise.all(o), r.lu.Y_(i), await async function __PRIVATE_localStoreNotifyLocalViewChanges(e, t) {
16977
16999
  const n = __PRIVATE_debugCast(e);
16978
17000
  try {
16979
17001
  await n.persistence.runTransaction("notifyLocalViewChanges", "readwrite", (e => PersistencePromise.forEach(t, (t => PersistencePromise.forEach(t.ds, (r => n.persistence.referenceDelegate.addReference(e, t.targetId, r))).next((() => PersistencePromise.forEach(t.As, (r => n.persistence.referenceDelegate.removeReference(e, t.targetId, r)))))))));
@@ -17004,11 +17026,11 @@ async function __PRIVATE_syncEngineHandleCredentialChange(e, t) {
17004
17026
  n.currentUser = t,
17005
17027
  // Fails tasks waiting for pending writes requested by previous user.
17006
17028
  function __PRIVATE_rejectOutstandingPendingWritesCallbacks(e, t) {
17007
- e.Eu.forEach((e => {
17029
+ e.Ru.forEach((e => {
17008
17030
  e.forEach((e => {
17009
17031
  e.reject(new FirestoreError(N.CANCELLED, t));
17010
17032
  }));
17011
- })), e.Eu.clear();
17033
+ })), e.Ru.clear();
17012
17034
  }(n, "'waitForPendingWrites' promise is rejected due to a user change."),
17013
17035
  // TODO(b/114226417): Consider calling this only in the primary tab.
17014
17036
  n.sharedClientState.handleUserChange(t, e.removedBatchIds, e.addedBatchIds), await __PRIVATE_syncEngineEmitNewSnapsAndNotifyLocalStore(n, e.ks);
@@ -17016,15 +17038,15 @@ async function __PRIVATE_syncEngineHandleCredentialChange(e, t) {
17016
17038
  }
17017
17039
 
17018
17040
  function __PRIVATE_syncEngineGetRemoteKeysForTarget(e, t) {
17019
- const n = __PRIVATE_debugCast(e), r = n.Pu.get(t);
17020
- if (r && r._u) return __PRIVATE_documentKeySet().add(r.key);
17041
+ const n = __PRIVATE_debugCast(e), r = n.Eu.get(t);
17042
+ if (r && r.cu) return __PRIVATE_documentKeySet().add(r.key);
17021
17043
  {
17022
17044
  let e = __PRIVATE_documentKeySet();
17023
- const r = n.cu.get(t);
17045
+ const r = n.Pu.get(t);
17024
17046
  if (!r) return e;
17025
17047
  for (const t of r) {
17026
- const r = n.uu.get(t);
17027
- e = e.unionWith(r.view.Ya);
17048
+ const r = n.hu.get(t);
17049
+ e = e.unionWith(r.view.eu);
17028
17050
  }
17029
17051
  return e;
17030
17052
  }
@@ -17035,8 +17057,8 @@ function __PRIVATE_syncEngineGetRemoteKeysForTarget(e, t) {
17035
17057
  * from persistence.
17036
17058
  */ async function __PRIVATE_synchronizeViewAndComputeSnapshot(e, t) {
17037
17059
  const n = __PRIVATE_debugCast(e), r = await __PRIVATE_localStoreExecuteQuery(n.localStore, t.query,
17038
- /* usePreviousResults= */ !0), i = t.view.su(r);
17039
- return n.isPrimaryClient && __PRIVATE_updateTrackedLimbos(n, t.targetId, i.ru),
17060
+ /* usePreviousResults= */ !0), i = t.view.au(r);
17061
+ return n.isPrimaryClient && __PRIVATE_updateTrackedLimbos(n, t.targetId, i.ou),
17040
17062
  i;
17041
17063
  }
17042
17064
 
@@ -17072,7 +17094,7 @@ async function __PRIVATE_syncEngineApplyBatchState(e, t, n, r) {
17072
17094
  }
17073
17095
  // PORTING NOTE: Multi-Tab only.
17074
17096
  (i.localStore, t)) : fail(6720, "Unknown batchState", {
17075
- fu: n
17097
+ yu: n
17076
17098
  }), await __PRIVATE_syncEngineEmitNewSnapsAndNotifyLocalStore(i, s)) :
17077
17099
  // A throttled tab may not have seen the mutation before it was completed
17078
17100
  // and removed from the mutation queue, in which case we won't have cached
@@ -17089,7 +17111,7 @@ async function __PRIVATE_syncEngineApplyBatchState(e, t, n, r) {
17089
17111
  async function __PRIVATE_syncEngineApplyPrimaryState(e, t) {
17090
17112
  const n = __PRIVATE_debugCast(e);
17091
17113
  if (__PRIVATE_ensureWatchCallbacks(n), __PRIVATE_syncEngineEnsureWriteCallbacks(n),
17092
- !0 === t && !0 !== n.Au) {
17114
+ !0 === t && !0 !== n.mu) {
17093
17115
  // Secondary tabs only maintain Views for their local listeners and the
17094
17116
  // Views internal state may not be 100% populated (in particular
17095
17117
  // secondary tabs don't track syncedDocuments, the set of documents the
@@ -17097,12 +17119,12 @@ async function __PRIVATE_syncEngineApplyPrimaryState(e, t) {
17097
17119
  // primary, we need to need to make sure that all views for all targets
17098
17120
  // match the state on disk.
17099
17121
  const e = n.sharedClientState.getAllActiveQueryTargets(), t = await __PRIVATE_synchronizeQueryViewsAndRaiseSnapshots(n, e.toArray());
17100
- n.Au = !0, await __PRIVATE_remoteStoreApplyPrimaryState(n.remoteStore, !0);
17122
+ n.mu = !0, await __PRIVATE_remoteStoreApplyPrimaryState(n.remoteStore, !0);
17101
17123
  for (const e of t) __PRIVATE_remoteStoreListen(n.remoteStore, e);
17102
- } else if (!1 === t && !1 !== n.Au) {
17124
+ } else if (!1 === t && !1 !== n.mu) {
17103
17125
  const e = [];
17104
17126
  let t = Promise.resolve();
17105
- n.cu.forEach(((r, i) => {
17127
+ n.Pu.forEach(((r, i) => {
17106
17128
  n.sharedClientState.isLocalQueryTarget(i) ? e.push(i) : t = t.then((() => (__PRIVATE_removeAndCleanupTarget(n, i),
17107
17129
  __PRIVATE_localStoreReleaseTarget(n.localStore, i,
17108
17130
  /*keepPersistedTargetData=*/ !0)))), __PRIVATE_remoteStoreUnlisten(n.remoteStore, i);
@@ -17110,9 +17132,9 @@ async function __PRIVATE_syncEngineApplyPrimaryState(e, t) {
17110
17132
  // PORTING NOTE: Multi-Tab only.
17111
17133
  function __PRIVATE_resetLimboDocuments(e) {
17112
17134
  const t = __PRIVATE_debugCast(e);
17113
- t.Pu.forEach(((e, n) => {
17135
+ t.Eu.forEach(((e, n) => {
17114
17136
  __PRIVATE_remoteStoreUnlisten(t.remoteStore, n);
17115
- })), t.Tu.Jr(), t.Pu = new Map, t.hu = new SortedMap(DocumentKey.comparator);
17137
+ })), t.du.Jr(), t.Eu = new Map, t.Iu = new SortedMap(DocumentKey.comparator);
17116
17138
  }
17117
17139
  /**
17118
17140
  * Reconcile the query views of the provided query targets with the state from
@@ -17125,7 +17147,7 @@ async function __PRIVATE_syncEngineApplyPrimaryState(e, t) {
17125
17147
  * tab to a primary tab
17126
17148
  */
17127
17149
  // PORTING NOTE: Multi-Tab only.
17128
- (n), n.Au = !1, await __PRIVATE_remoteStoreApplyPrimaryState(n.remoteStore, !1);
17150
+ (n), n.mu = !1, await __PRIVATE_remoteStoreApplyPrimaryState(n.remoteStore, !1);
17129
17151
  }
17130
17152
  }
17131
17153
 
@@ -17133,7 +17155,7 @@ async function __PRIVATE_synchronizeQueryViewsAndRaiseSnapshots(e, t, n) {
17133
17155
  const r = __PRIVATE_debugCast(e), i = [], s = [];
17134
17156
  for (const e of t) {
17135
17157
  let t;
17136
- const n = r.cu.get(e);
17158
+ const n = r.Pu.get(e);
17137
17159
  if (n && 0 !== n.length) {
17138
17160
  // For queries that have a local View, we fetch their current state
17139
17161
  // from LocalStore (as the resume token and the snapshot version
@@ -17141,7 +17163,7 @@ async function __PRIVATE_synchronizeQueryViewsAndRaiseSnapshots(e, t, n) {
17141
17163
  // state (the list of syncedDocuments may have gotten out of sync).
17142
17164
  t = await __PRIVATE_localStoreAllocateTarget(r.localStore, __PRIVATE_queryToTarget(n[0]));
17143
17165
  for (const e of n) {
17144
- const t = r.uu.get(e), n = await __PRIVATE_synchronizeViewAndComputeSnapshot(r, t);
17166
+ const t = r.hu.get(e), n = await __PRIVATE_synchronizeViewAndComputeSnapshot(r, t);
17145
17167
  n.snapshot && s.push(n.snapshot);
17146
17168
  }
17147
17169
  } else {
@@ -17153,7 +17175,7 @@ async function __PRIVATE_synchronizeQueryViewsAndRaiseSnapshots(e, t, n) {
17153
17175
  }
17154
17176
  i.push(t);
17155
17177
  }
17156
- return r.au.j_(s), i;
17178
+ return r.lu.Y_(s), i;
17157
17179
  }
17158
17180
 
17159
17181
  /**
@@ -17183,11 +17205,11 @@ function __PRIVATE_syncEngineGetActiveClients(e) {
17183
17205
  // PORTING NOTE: Multi-Tab only.
17184
17206
  async function __PRIVATE_syncEngineApplyTargetState(e, t, n, r) {
17185
17207
  const i = __PRIVATE_debugCast(e);
17186
- if (i.Au)
17208
+ if (i.mu)
17187
17209
  // If we receive a target state notification via WebStorage, we are
17188
17210
  // either already secondary or another tab has taken the primary lease.
17189
17211
  return void __PRIVATE_logDebug(nn, "Ignoring unexpected query state notification.");
17190
- const s = i.cu.get(t);
17212
+ const s = i.Pu.get(t);
17191
17213
  if (s && s.length > 0) switch (n) {
17192
17214
  case "current":
17193
17215
  case "not-current":
@@ -17209,9 +17231,9 @@ async function __PRIVATE_syncEngineApplyTargetState(e, t, n, r) {
17209
17231
 
17210
17232
  /** Adds or removes Watch targets for queries from different tabs. */ async function __PRIVATE_syncEngineApplyActiveTargetsChange(e, t, n) {
17211
17233
  const r = __PRIVATE_ensureWatchCallbacks(e);
17212
- if (r.Au) {
17234
+ if (r.mu) {
17213
17235
  for (const e of t) {
17214
- if (r.cu.has(e) && r.sharedClientState.isActiveQueryTarget(e)) {
17236
+ if (r.Pu.has(e) && r.sharedClientState.isActiveQueryTarget(e)) {
17215
17237
  __PRIVATE_logDebug(nn, "Adding an already active target " + e);
17216
17238
  continue;
17217
17239
  }
@@ -17222,7 +17244,7 @@ async function __PRIVATE_syncEngineApplyTargetState(e, t, n, r) {
17222
17244
  for (const e of n)
17223
17245
  // Check that the target is still active since the target might have been
17224
17246
  // removed if it has been rejected by the backend.
17225
- r.cu.has(e) &&
17247
+ r.Pu.has(e) &&
17226
17248
  // Release queries that are still active.
17227
17249
  await __PRIVATE_localStoreReleaseTarget(r.localStore, e,
17228
17250
  /* keepPersistedTargetData */ !1).then((() => {
@@ -17236,7 +17258,7 @@ function __PRIVATE_ensureWatchCallbacks(e) {
17236
17258
  return t.remoteStore.remoteSyncer.applyRemoteEvent = __PRIVATE_syncEngineApplyRemoteEvent.bind(null, t),
17237
17259
  t.remoteStore.remoteSyncer.getRemoteKeysForTarget = __PRIVATE_syncEngineGetRemoteKeysForTarget.bind(null, t),
17238
17260
  t.remoteStore.remoteSyncer.rejectListen = __PRIVATE_syncEngineRejectListen.bind(null, t),
17239
- t.au.j_ = __PRIVATE_eventManagerOnWatchChange.bind(null, t.eventManager), t.au.Vu = __PRIVATE_eventManagerOnWatchError.bind(null, t.eventManager),
17261
+ t.lu.Y_ = __PRIVATE_eventManagerOnWatchChange.bind(null, t.eventManager), t.lu.gu = __PRIVATE_eventManagerOnWatchError.bind(null, t.eventManager),
17240
17262
  t;
17241
17263
  }
17242
17264
 
@@ -17279,13 +17301,13 @@ function __PRIVATE_syncEngineEnsureWriteCallbacks(e) {
17279
17301
  }(r)), Promise.resolve(new Set);
17280
17302
  n._updateProgress(__PRIVATE_bundleInitialProgress(r));
17281
17303
  const i = new __PRIVATE_BundleLoader(r, e.localStore, t.serializer);
17282
- let s = await t.gu();
17304
+ let s = await t.wu();
17283
17305
  for (;s; ) {
17284
- const e = await i.$a(s);
17285
- e && n._updateProgress(e), s = await t.gu();
17306
+ const e = await i.Wa(s);
17307
+ e && n._updateProgress(e), s = await t.wu();
17286
17308
  }
17287
17309
  const o = await i.complete();
17288
- return await __PRIVATE_syncEngineEmitNewSnapsAndNotifyLocalStore(e, o.Wa,
17310
+ return await __PRIVATE_syncEngineEmitNewSnapsAndNotifyLocalStore(e, o.ja,
17289
17311
  /* remoteEvent */ void 0),
17290
17312
  // Save metadata, so loading the same bundle will skip.
17291
17313
  await function __PRIVATE_localStoreSaveBundle(e, t) {
@@ -17295,7 +17317,7 @@ function __PRIVATE_syncEngineEnsureWriteCallbacks(e) {
17295
17317
  /**
17296
17318
  * Returns a promise of a `NamedQuery` associated with given query name. Promise
17297
17319
  * resolves to undefined if no persisted data can be found.
17298
- */ (e.localStore, r), n._completeWith(o.progress), Promise.resolve(o.Ka);
17320
+ */ (e.localStore, r), n._completeWith(o.progress), Promise.resolve(o.za);
17299
17321
  } catch (e) {
17300
17322
  return __PRIVATE_logWarn(nn, `Loading bundle failed with ${e}`), n._failWith(e),
17301
17323
  Promise.resolve(new Set);
@@ -17330,23 +17352,23 @@ class __PRIVATE_MemoryOfflineComponentProvider {
17330
17352
  this.kind = "memory", this.synchronizeTabs = !1;
17331
17353
  }
17332
17354
  async initialize(e) {
17333
- this.serializer = __PRIVATE_newSerializer(e.databaseInfo.databaseId), this.sharedClientState = this.pu(e),
17334
- this.persistence = this.yu(e), await this.persistence.start(), this.localStore = this.wu(e),
17335
- this.gcScheduler = this.Su(e, this.localStore), this.indexBackfillerScheduler = this.bu(e, this.localStore);
17355
+ this.serializer = __PRIVATE_newSerializer(e.databaseInfo.databaseId), this.sharedClientState = this.bu(e),
17356
+ this.persistence = this.Su(e), await this.persistence.start(), this.localStore = this.Du(e),
17357
+ this.gcScheduler = this.vu(e, this.localStore), this.indexBackfillerScheduler = this.Cu(e, this.localStore);
17336
17358
  }
17337
- Su(e, t) {
17359
+ vu(e, t) {
17338
17360
  return null;
17339
17361
  }
17340
- bu(e, t) {
17362
+ Cu(e, t) {
17341
17363
  return null;
17342
17364
  }
17343
- wu(e) {
17365
+ Du(e) {
17344
17366
  return __PRIVATE_newLocalStore(this.persistence, new __PRIVATE_QueryEngine, e.initialUser, this.serializer);
17345
17367
  }
17346
- yu(e) {
17368
+ Su(e) {
17347
17369
  return new __PRIVATE_MemoryPersistence(__PRIVATE_MemoryEagerDelegate.fi, this.serializer);
17348
17370
  }
17349
- pu(e) {
17371
+ bu(e) {
17350
17372
  return new __PRIVATE_MemorySharedClientState;
17351
17373
  }
17352
17374
  async terminate() {
@@ -17364,12 +17386,12 @@ class __PRIVATE_LruGcMemoryOfflineComponentProvider extends __PRIVATE_MemoryOffl
17364
17386
  constructor(e) {
17365
17387
  super(), this.cacheSizeBytes = e;
17366
17388
  }
17367
- Su(e, t) {
17389
+ vu(e, t) {
17368
17390
  __PRIVATE_hardAssert(this.persistence.referenceDelegate instanceof __PRIVATE_MemoryLruDelegate, 46915);
17369
17391
  const n = this.persistence.referenceDelegate.garbageCollector;
17370
17392
  return new __PRIVATE_LruScheduler(n, e.asyncQueue, t);
17371
17393
  }
17372
- yu(e) {
17394
+ Su(e) {
17373
17395
  const t = void 0 !== this.cacheSizeBytes ? LruParams.withCacheSize(this.cacheSizeBytes) : LruParams.DEFAULT;
17374
17396
  return new __PRIVATE_MemoryPersistence((e => __PRIVATE_MemoryLruDelegate.fi(e, t)), this.serializer);
17375
17397
  }
@@ -17379,35 +17401,35 @@ class __PRIVATE_LruGcMemoryOfflineComponentProvider extends __PRIVATE_MemoryOffl
17379
17401
  * Provides all components needed for Firestore with IndexedDB persistence.
17380
17402
  */ class __PRIVATE_IndexedDbOfflineComponentProvider extends __PRIVATE_MemoryOfflineComponentProvider {
17381
17403
  constructor(e, t, n) {
17382
- super(), this.Du = e, this.cacheSizeBytes = t, this.forceOwnership = n, this.kind = "persistent",
17404
+ super(), this.Fu = e, this.cacheSizeBytes = t, this.forceOwnership = n, this.kind = "persistent",
17383
17405
  this.synchronizeTabs = !1;
17384
17406
  }
17385
17407
  async initialize(e) {
17386
- await super.initialize(e), await this.Du.initialize(this, e),
17408
+ await super.initialize(e), await this.Fu.initialize(this, e),
17387
17409
  // Enqueue writes from a previous session
17388
- await __PRIVATE_syncEngineEnsureWriteCallbacks(this.Du.syncEngine), await __PRIVATE_fillWritePipeline(this.Du.remoteStore),
17410
+ await __PRIVATE_syncEngineEnsureWriteCallbacks(this.Fu.syncEngine), await __PRIVATE_fillWritePipeline(this.Fu.remoteStore),
17389
17411
  // NOTE: This will immediately call the listener, so we make sure to
17390
17412
  // set it after localStore / remoteStore are started.
17391
17413
  await this.persistence.Ji((() => (this.gcScheduler && !this.gcScheduler.started && this.gcScheduler.start(),
17392
17414
  this.indexBackfillerScheduler && !this.indexBackfillerScheduler.started && this.indexBackfillerScheduler.start(),
17393
17415
  Promise.resolve())));
17394
17416
  }
17395
- wu(e) {
17417
+ Du(e) {
17396
17418
  return __PRIVATE_newLocalStore(this.persistence, new __PRIVATE_QueryEngine, e.initialUser, this.serializer);
17397
17419
  }
17398
- Su(e, t) {
17420
+ vu(e, t) {
17399
17421
  const n = this.persistence.referenceDelegate.garbageCollector;
17400
17422
  return new __PRIVATE_LruScheduler(n, e.asyncQueue, t);
17401
17423
  }
17402
- bu(e, t) {
17424
+ Cu(e, t) {
17403
17425
  const n = new __PRIVATE_IndexBackfiller(t, this.persistence);
17404
17426
  return new __PRIVATE_IndexBackfillerScheduler(e.asyncQueue, n);
17405
17427
  }
17406
- yu(e) {
17428
+ Su(e) {
17407
17429
  const t = __PRIVATE_indexedDbStoragePrefix(e.databaseInfo.databaseId, e.databaseInfo.persistenceKey), n = void 0 !== this.cacheSizeBytes ? LruParams.withCacheSize(this.cacheSizeBytes) : LruParams.DEFAULT;
17408
17430
  return new __PRIVATE_IndexedDbPersistence(this.synchronizeTabs, t, e.clientId, n, e.asyncQueue, __PRIVATE_getWindow(), getDocument(), this.serializer, this.sharedClientState, !!this.forceOwnership);
17409
17431
  }
17410
- pu(e) {
17432
+ bu(e) {
17411
17433
  return new __PRIVATE_MemorySharedClientState;
17412
17434
  }
17413
17435
  }
@@ -17421,11 +17443,11 @@ class __PRIVATE_LruGcMemoryOfflineComponentProvider extends __PRIVATE_MemoryOffl
17421
17443
  * `synchronizeTabs` will be enabled.
17422
17444
  */ class __PRIVATE_MultiTabOfflineComponentProvider extends __PRIVATE_IndexedDbOfflineComponentProvider {
17423
17445
  constructor(e, t) {
17424
- super(e, t, /* forceOwnership= */ !1), this.Du = e, this.cacheSizeBytes = t, this.synchronizeTabs = !0;
17446
+ super(e, t, /* forceOwnership= */ !1), this.Fu = e, this.cacheSizeBytes = t, this.synchronizeTabs = !0;
17425
17447
  }
17426
17448
  async initialize(e) {
17427
17449
  await super.initialize(e);
17428
- const t = this.Du.syncEngine;
17450
+ const t = this.Fu.syncEngine;
17429
17451
  this.sharedClientState instanceof __PRIVATE_WebStorageSharedClientState && (this.sharedClientState.syncEngine = {
17430
17452
  Co: __PRIVATE_syncEngineApplyBatchState.bind(null, t),
17431
17453
  Fo: __PRIVATE_syncEngineApplyTargetState.bind(null, t),
@@ -17436,11 +17458,11 @@ class __PRIVATE_LruGcMemoryOfflineComponentProvider extends __PRIVATE_MemoryOffl
17436
17458
  // NOTE: This will immediately call the listener, so we make sure to
17437
17459
  // set it after localStore / remoteStore are started.
17438
17460
  await this.persistence.Ji((async e => {
17439
- await __PRIVATE_syncEngineApplyPrimaryState(this.Du.syncEngine, e), this.gcScheduler && (e && !this.gcScheduler.started ? this.gcScheduler.start() : e || this.gcScheduler.stop()),
17461
+ await __PRIVATE_syncEngineApplyPrimaryState(this.Fu.syncEngine, e), this.gcScheduler && (e && !this.gcScheduler.started ? this.gcScheduler.start() : e || this.gcScheduler.stop()),
17440
17462
  this.indexBackfillerScheduler && (e && !this.indexBackfillerScheduler.started ? this.indexBackfillerScheduler.start() : e || this.indexBackfillerScheduler.stop());
17441
17463
  }));
17442
17464
  }
17443
- pu(e) {
17465
+ bu(e) {
17444
17466
  const t = __PRIVATE_getWindow();
17445
17467
  if (!__PRIVATE_WebStorageSharedClientState.C(t)) throw new FirestoreError(N.UNIMPLEMENTED, "IndexedDB persistence is only available on platforms that support LocalStorage.");
17446
17468
  const n = __PRIVATE_indexedDbStoragePrefix(e.databaseInfo.databaseId, e.databaseInfo.persistenceKey);
@@ -17487,18 +17509,18 @@ class __PRIVATE_LruGcMemoryOfflineComponentProvider extends __PRIVATE_MemoryOffl
17487
17509
  // PORTING NOTE: Manages state synchronization in multi-tab environments.
17488
17510
  r, i, s, o) {
17489
17511
  const _ = new __PRIVATE_SyncEngineImpl(e, t, n, r, i, s);
17490
- return o && (_.Au = !0), _;
17512
+ return o && (_.mu = !0), _;
17491
17513
  }(this.localStore, this.remoteStore, this.eventManager, this.sharedClientState, e.initialUser, e.maxConcurrentLimboResolutions, t);
17492
17514
  }
17493
17515
  async terminate() {
17494
17516
  var e, t;
17495
17517
  await async function __PRIVATE_remoteStoreShutdown(e) {
17496
17518
  const t = __PRIVATE_debugCast(e);
17497
- __PRIVATE_logDebug(Xt, "RemoteStore shutting down."), t.Ta.add(5 /* OfflineCause.Shutdown */),
17498
- await __PRIVATE_disableNetworkInternal(t), t.Ea.shutdown(),
17519
+ __PRIVATE_logDebug(Xt, "RemoteStore shutting down."), t.da.add(5 /* OfflineCause.Shutdown */),
17520
+ await __PRIVATE_disableNetworkInternal(t), t.Ra.shutdown(),
17499
17521
  // Set the OnlineState to Unknown (rather than Offline) to avoid potentially
17500
17522
  // triggering spurious listener events with cached data, etc.
17501
- t.da.set("Unknown" /* OnlineState.Unknown */);
17523
+ t.Va.set("Unknown" /* OnlineState.Unknown */);
17502
17524
  }(this.remoteStore), null === (e = this.datastore) || void 0 === e || e.terminate(),
17503
17525
  null === (t = this.eventManager) || void 0 === t || t.terminate();
17504
17526
  }
@@ -17590,15 +17612,15 @@ class __PRIVATE_AsyncObserver {
17590
17612
  this.muted = !1;
17591
17613
  }
17592
17614
  next(e) {
17593
- this.muted || this.observer.next && this.vu(this.observer.next, e);
17615
+ this.muted || this.observer.next && this.Mu(this.observer.next, e);
17594
17616
  }
17595
17617
  error(e) {
17596
- this.muted || (this.observer.error ? this.vu(this.observer.error, e) : __PRIVATE_logError("Uncaught Error in snapshot listener:", e.toString()));
17618
+ this.muted || (this.observer.error ? this.Mu(this.observer.error, e) : __PRIVATE_logError("Uncaught Error in snapshot listener:", e.toString()));
17597
17619
  }
17598
- Cu() {
17620
+ xu() {
17599
17621
  this.muted = !0;
17600
17622
  }
17601
- vu(e, t) {
17623
+ Mu(e, t) {
17602
17624
  setTimeout((() => {
17603
17625
  this.muted || e(t);
17604
17626
  }), 0);
@@ -17630,30 +17652,30 @@ class __PRIVATE_AsyncObserver {
17630
17652
  constructor(
17631
17653
  /** The reader to read from underlying binary bundle data source. */
17632
17654
  e, t) {
17633
- this.Fu = e, this.serializer = t,
17655
+ this.Ou = e, this.serializer = t,
17634
17656
  /** Cached bundle metadata. */
17635
17657
  this.metadata = new __PRIVATE_Deferred,
17636
17658
  /**
17637
17659
  * Internal buffer to hold bundle content, accumulating incomplete element
17638
17660
  * content.
17639
17661
  */
17640
- this.buffer = new Uint8Array, this.Mu = function __PRIVATE_newTextDecoder() {
17662
+ this.buffer = new Uint8Array, this.Nu = function __PRIVATE_newTextDecoder() {
17641
17663
  return new TextDecoder("utf-8");
17642
17664
  }(),
17643
17665
  // Read the metadata (which is the first element).
17644
- this.xu().then((e => {
17645
- e && e.qa() ? this.metadata.resolve(e.ka.metadata) : this.metadata.reject(new Error(`The first element of the bundle is not a metadata, it is\n ${JSON.stringify(null == e ? void 0 : e.ka)}`));
17666
+ this.Bu().then((e => {
17667
+ e && e.Ua() ? this.metadata.resolve(e.$a.metadata) : this.metadata.reject(new Error(`The first element of the bundle is not a metadata, it is\n ${JSON.stringify(null == e ? void 0 : e.$a)}`));
17646
17668
  }), (e => this.metadata.reject(e)));
17647
17669
  }
17648
17670
  close() {
17649
- return this.Fu.cancel();
17671
+ return this.Ou.cancel();
17650
17672
  }
17651
17673
  async getMetadata() {
17652
17674
  return this.metadata.promise;
17653
17675
  }
17654
- async gu() {
17676
+ async wu() {
17655
17677
  // Makes sure metadata is read before proceeding.
17656
- return await this.getMetadata(), this.xu();
17678
+ return await this.getMetadata(), this.Bu();
17657
17679
  }
17658
17680
  /**
17659
17681
  * Reads from the head of internal buffer, and pulling more data from
@@ -17664,15 +17686,15 @@ class __PRIVATE_AsyncObserver {
17664
17686
  *
17665
17687
  * Returns either the bundled element, or null if we have reached the end of
17666
17688
  * the stream.
17667
- */ async xu() {
17668
- const e = await this.Ou();
17689
+ */ async Bu() {
17690
+ const e = await this.Lu();
17669
17691
  if (null === e) return null;
17670
- const t = this.Mu.decode(e), n = Number(t);
17671
- isNaN(n) && this.Nu(`length string (${t}) is not valid number`);
17672
- const r = await this.Bu(n);
17692
+ const t = this.Nu.decode(e), n = Number(t);
17693
+ isNaN(n) && this.ku(`length string (${t}) is not valid number`);
17694
+ const r = await this.qu(n);
17673
17695
  return new __PRIVATE_SizedBundleElement(JSON.parse(r), e.length + n);
17674
17696
  }
17675
- /** First index of '{' from the underlying buffer. */ Lu() {
17697
+ /** First index of '{' from the underlying buffer. */ Qu() {
17676
17698
  return this.buffer.findIndex((e => e === "{".charCodeAt(0)));
17677
17699
  }
17678
17700
  /**
@@ -17680,17 +17702,17 @@ class __PRIVATE_AsyncObserver {
17680
17702
  * return the content.
17681
17703
  *
17682
17704
  * If reached end of the stream, returns a null.
17683
- */ async Ou() {
17684
- for (;this.Lu() < 0; ) {
17685
- if (await this.ku()) break;
17705
+ */ async Lu() {
17706
+ for (;this.Qu() < 0; ) {
17707
+ if (await this.$u()) break;
17686
17708
  }
17687
17709
  // Broke out of the loop because underlying stream is closed, and there
17688
17710
  // happens to be no more data to process.
17689
17711
  if (0 === this.buffer.length) return null;
17690
- const e = this.Lu();
17712
+ const e = this.Qu();
17691
17713
  // Broke out of the loop because underlying stream is closed, but still
17692
17714
  // cannot find an open bracket.
17693
- e < 0 && this.Nu("Reached the end of bundle when a length string is expected.");
17715
+ e < 0 && this.ku("Reached the end of bundle when a length string is expected.");
17694
17716
  const t = this.buffer.slice(0, e);
17695
17717
  // Update the internal buffer to drop the read length.
17696
17718
  return this.buffer = this.buffer.slice(e), t;
@@ -17700,23 +17722,23 @@ class __PRIVATE_AsyncObserver {
17700
17722
  * number of bytes, pulling more data from the underlying stream if needed.
17701
17723
  *
17702
17724
  * Returns a string decoded from the read bytes.
17703
- */ async Bu(e) {
17725
+ */ async qu(e) {
17704
17726
  for (;this.buffer.length < e; ) {
17705
- await this.ku() && this.Nu("Reached the end of bundle when more is expected.");
17727
+ await this.$u() && this.ku("Reached the end of bundle when more is expected.");
17706
17728
  }
17707
- const t = this.Mu.decode(this.buffer.slice(0, e));
17729
+ const t = this.Nu.decode(this.buffer.slice(0, e));
17708
17730
  // Update the internal buffer to drop the read json string.
17709
17731
  return this.buffer = this.buffer.slice(e), t;
17710
17732
  }
17711
- Nu(e) {
17733
+ ku(e) {
17712
17734
  // eslint-disable-next-line @typescript-eslint/no-floating-promises
17713
- throw this.Fu.cancel(), new Error(`Invalid bundle format: ${e}`);
17735
+ throw this.Ou.cancel(), new Error(`Invalid bundle format: ${e}`);
17714
17736
  }
17715
17737
  /**
17716
17738
  * Pulls more data from underlying stream to internal buffer.
17717
17739
  * Returns a boolean indicating whether the stream is finished.
17718
- */ async ku() {
17719
- const e = await this.Fu.read();
17740
+ */ async $u() {
17741
+ const e = await this.Ou.read();
17720
17742
  if (!e.done) {
17721
17743
  const t = new Uint8Array(this.buffer.length + e.value.length);
17722
17744
  t.set(this.buffer), t.set(e.value, this.buffer.length), this.buffer = t;
@@ -17821,7 +17843,7 @@ class Transaction$2 {
17821
17843
  let t;
17822
17844
  if (e.isFoundDocument()) t = e.version; else {
17823
17845
  if (!e.isNoDocument()) throw fail(50498, {
17824
- qu: e.constructor.name
17846
+ Uu: e.constructor.name
17825
17847
  });
17826
17848
  // Represent a deleted doc using SnapshotVersion.min().
17827
17849
  t = SnapshotVersion.min();
@@ -17893,26 +17915,26 @@ class Transaction$2 {
17893
17915
  */ class __PRIVATE_TransactionRunner {
17894
17916
  constructor(e, t, n, r, i) {
17895
17917
  this.asyncQueue = e, this.datastore = t, this.options = n, this.updateFunction = r,
17896
- this.deferred = i, this.Qu = n.maxAttempts, this.C_ = new __PRIVATE_ExponentialBackoff(this.asyncQueue, "transaction_retry" /* TimerId.TransactionRetry */);
17918
+ this.deferred = i, this.Ku = n.maxAttempts, this.x_ = new __PRIVATE_ExponentialBackoff(this.asyncQueue, "transaction_retry" /* TimerId.TransactionRetry */);
17897
17919
  }
17898
- /** Runs the transaction and sets the result on deferred. */ $u() {
17899
- this.Qu -= 1, this.Uu();
17920
+ /** Runs the transaction and sets the result on deferred. */ Wu() {
17921
+ this.Ku -= 1, this.Gu();
17900
17922
  }
17901
- Uu() {
17902
- this.C_.f_((async () => {
17903
- const e = new Transaction$2(this.datastore), t = this.Ku(e);
17923
+ Gu() {
17924
+ this.x_.y_((async () => {
17925
+ const e = new Transaction$2(this.datastore), t = this.zu(e);
17904
17926
  t && t.then((t => {
17905
17927
  this.asyncQueue.enqueueAndForget((() => e.commit().then((() => {
17906
17928
  this.deferred.resolve(t);
17907
17929
  })).catch((e => {
17908
- this.Wu(e);
17930
+ this.ju(e);
17909
17931
  }))));
17910
17932
  })).catch((e => {
17911
- this.Wu(e);
17933
+ this.ju(e);
17912
17934
  }));
17913
17935
  }));
17914
17936
  }
17915
- Ku(e) {
17937
+ zu(e) {
17916
17938
  try {
17917
17939
  const t = this.updateFunction(e);
17918
17940
  return !__PRIVATE_isNullOrUndefined(t) && t.catch && t.then ? t : (this.deferred.reject(Error("Transaction callback must return a Promise")),
@@ -17922,11 +17944,11 @@ class Transaction$2 {
17922
17944
  return this.deferred.reject(e), null;
17923
17945
  }
17924
17946
  }
17925
- Wu(e) {
17926
- this.Qu > 0 && this.Gu(e) ? (this.Qu -= 1, this.asyncQueue.enqueueAndForget((() => (this.Uu(),
17947
+ ju(e) {
17948
+ this.Ku > 0 && this.Hu(e) ? (this.Ku -= 1, this.asyncQueue.enqueueAndForget((() => (this.Gu(),
17927
17949
  Promise.resolve())))) : this.deferred.reject(e);
17928
17950
  }
17929
- Gu(e) {
17951
+ Hu(e) {
17930
17952
  if ("FirebaseError" === e.name) {
17931
17953
  // In transactions, the backend will fail outdated reads with FAILED_PRECONDITION and
17932
17954
  // non-matching document versions with ABORTED. These errors should be retried.
@@ -18104,7 +18126,7 @@ async function __PRIVATE_getEventManager(e) {
18104
18126
  const t = await __PRIVATE_getPersistence(e), n = await __PRIVATE_getRemoteStore(e);
18105
18127
  return t.setNetworkEnabled(!0), function __PRIVATE_remoteStoreEnableNetwork(e) {
18106
18128
  const t = __PRIVATE_debugCast(e);
18107
- return t.Ta.delete(0 /* OfflineCause.UserDisabled */), __PRIVATE_enableNetworkInternal(t);
18129
+ return t.da.delete(0 /* OfflineCause.UserDisabled */), __PRIVATE_enableNetworkInternal(t);
18108
18130
  }(n);
18109
18131
  }));
18110
18132
  }
@@ -18114,9 +18136,9 @@ async function __PRIVATE_getEventManager(e) {
18114
18136
  const t = await __PRIVATE_getPersistence(e), n = await __PRIVATE_getRemoteStore(e);
18115
18137
  return t.setNetworkEnabled(!1), async function __PRIVATE_remoteStoreDisableNetwork(e) {
18116
18138
  const t = __PRIVATE_debugCast(e);
18117
- t.Ta.add(0 /* OfflineCause.UserDisabled */), await __PRIVATE_disableNetworkInternal(t),
18139
+ t.da.add(0 /* OfflineCause.UserDisabled */), await __PRIVATE_disableNetworkInternal(t),
18118
18140
  // Set the OnlineState to Offline so get()s return from cache, etc.
18119
- t.da.set("Offline" /* OnlineState.Offline */);
18141
+ t.Va.set("Offline" /* OnlineState.Offline */);
18120
18142
  }(n);
18121
18143
  }));
18122
18144
  }
@@ -18152,7 +18174,7 @@ function __PRIVATE_firestoreClientGetDocumentViaSnapshotListener(e, t, n = {}) {
18152
18174
  next: _ => {
18153
18175
  // Mute and remove query first before passing event to user to avoid
18154
18176
  // user actions affecting the now stale query.
18155
- s.Cu(), t.enqueueAndForget((() => __PRIVATE_eventManagerUnlisten(e, o)));
18177
+ s.xu(), t.enqueueAndForget((() => __PRIVATE_eventManagerUnlisten(e, o)));
18156
18178
  const a = _.docs.has(n);
18157
18179
  !a && _.fromCache ?
18158
18180
  // TODO(dimond): If we're online and the document doesn't
@@ -18167,7 +18189,7 @@ function __PRIVATE_firestoreClientGetDocumentViaSnapshotListener(e, t, n = {}) {
18167
18189
  error: e => i.reject(e)
18168
18190
  }), o = new __PRIVATE_QueryListener(__PRIVATE_newQueryForPath(n.path), s, {
18169
18191
  includeMetadataChanges: !0,
18170
- La: !0
18192
+ Qa: !0
18171
18193
  });
18172
18194
  return __PRIVATE_eventManagerListen(e, o);
18173
18195
  }(await __PRIVATE_getEventManager(e), e.asyncQueue, t, n, r))), r.promise;
@@ -18178,7 +18200,7 @@ function __PRIVATE_firestoreClientGetDocumentsFromLocalCache(e, t) {
18178
18200
  return e.asyncQueue.enqueueAndForget((async () => async function __PRIVATE_executeQueryFromCache(e, t, n) {
18179
18201
  try {
18180
18202
  const r = await __PRIVATE_localStoreExecuteQuery(e, t,
18181
- /* usePreviousResults= */ !0), i = new __PRIVATE_View(t, r.$s), s = i.Za(r.documents), o = i.applyChanges(s,
18203
+ /* usePreviousResults= */ !0), i = new __PRIVATE_View(t, r.$s), s = i.tu(r.documents), o = i.applyChanges(s,
18182
18204
  /* limboResolutionEnabled= */ !1);
18183
18205
  n.resolve(o.snapshot);
18184
18206
  } catch (e) {
@@ -18199,12 +18221,12 @@ function __PRIVATE_firestoreClientGetDocumentsViaSnapshotListener(e, t, n = {})
18199
18221
  next: n => {
18200
18222
  // Mute and remove query first before passing event to user to avoid
18201
18223
  // user actions affecting the now stale query.
18202
- s.Cu(), t.enqueueAndForget((() => __PRIVATE_eventManagerUnlisten(e, o))), n.fromCache && "server" === r.source ? i.reject(new FirestoreError(N.UNAVAILABLE, 'Failed to get documents from server. (However, these documents may exist in the local cache. Run again without setting source to "server" to retrieve the cached documents.)')) : i.resolve(n);
18224
+ s.xu(), t.enqueueAndForget((() => __PRIVATE_eventManagerUnlisten(e, o))), n.fromCache && "server" === r.source ? i.reject(new FirestoreError(N.UNAVAILABLE, 'Failed to get documents from server. (However, these documents may exist in the local cache. Run again without setting source to "server" to retrieve the cached documents.)')) : i.resolve(n);
18203
18225
  },
18204
18226
  error: e => i.reject(e)
18205
18227
  }), o = new __PRIVATE_QueryListener(n, s, {
18206
18228
  includeMetadataChanges: !0,
18207
- La: !0
18229
+ Qa: !0
18208
18230
  });
18209
18231
  return __PRIVATE_eventManagerListen(e, o);
18210
18232
  }(await __PRIVATE_getEventManager(e), e.asyncQueue, t, n, r))), r.promise;
@@ -18242,13 +18264,13 @@ function __PRIVATE_firestoreClientRunAggregateQuery(e, t, n) {
18242
18264
  function __PRIVATE_firestoreClientAddSnapshotsInSyncListener(e, t) {
18243
18265
  const n = new __PRIVATE_AsyncObserver(t);
18244
18266
  return e.asyncQueue.enqueueAndForget((async () => function __PRIVATE_addSnapshotsInSyncListener(e, t) {
18245
- __PRIVATE_debugCast(e).ba.add(t),
18267
+ __PRIVATE_debugCast(e).Ca.add(t),
18246
18268
  // Immediately fire an initial event, indicating all existing listeners
18247
18269
  // are in-sync.
18248
18270
  t.next();
18249
18271
  }(await __PRIVATE_getEventManager(e), n))), () => {
18250
- n.Cu(), e.asyncQueue.enqueueAndForget((async () => function __PRIVATE_removeSnapshotsInSyncListener(e, t) {
18251
- __PRIVATE_debugCast(e).ba.delete(t);
18272
+ n.xu(), e.asyncQueue.enqueueAndForget((async () => function __PRIVATE_removeSnapshotsInSyncListener(e, t) {
18273
+ __PRIVATE_debugCast(e).Ca.delete(t);
18252
18274
  }(await __PRIVATE_getEventManager(e), n)));
18253
18275
  };
18254
18276
  }
@@ -18936,37 +18958,37 @@ class __PRIVATE_AsyncQueueImpl {
18936
18958
  constructor(e = Promise.resolve()) {
18937
18959
  // A list of retryable operations. Retryable operations are run in order and
18938
18960
  // retried with backoff.
18939
- this.zu = [],
18961
+ this.Ju = [],
18940
18962
  // Is this AsyncQueue being shut down? Once it is set to true, it will not
18941
18963
  // be changed again.
18942
- this.ju = !1,
18964
+ this.Yu = !1,
18943
18965
  // Operations scheduled to be queued in the future. Operations are
18944
18966
  // automatically removed after they are run or canceled.
18945
- this.Hu = [],
18967
+ this.Zu = [],
18946
18968
  // visible for testing
18947
- this.Ju = null,
18969
+ this.Xu = null,
18948
18970
  // Flag set while there's an outstanding AsyncQueue operation, used for
18949
18971
  // assertion sanity-checks.
18950
- this.Yu = !1,
18972
+ this.ec = !1,
18951
18973
  // Enabled during shutdown on Safari to prevent future access to IndexedDB.
18952
- this.Zu = !1,
18974
+ this.tc = !1,
18953
18975
  // List of TimerIds to fast-forward delays for.
18954
- this.Xu = [],
18976
+ this.nc = [],
18955
18977
  // Backoff timer used to schedule retries for retryable operations
18956
- this.C_ = new __PRIVATE_ExponentialBackoff(this, "async_queue_retry" /* TimerId.AsyncQueueRetry */),
18978
+ this.x_ = new __PRIVATE_ExponentialBackoff(this, "async_queue_retry" /* TimerId.AsyncQueueRetry */),
18957
18979
  // Visibility handler that triggers an immediate retry of all retryable
18958
18980
  // operations. Meant to speed up recovery when we regain file system access
18959
18981
  // after page comes into foreground.
18960
- this.ec = () => {
18982
+ this.rc = () => {
18961
18983
  const e = getDocument();
18962
18984
  e && __PRIVATE_logDebug(an, "Visibility state changed to " + e.visibilityState),
18963
- this.C_.p_();
18964
- }, this.tc = e;
18985
+ this.x_.b_();
18986
+ }, this.sc = e;
18965
18987
  const t = getDocument();
18966
- t && "function" == typeof t.addEventListener && t.addEventListener("visibilitychange", this.ec);
18988
+ t && "function" == typeof t.addEventListener && t.addEventListener("visibilitychange", this.rc);
18967
18989
  }
18968
18990
  get isShuttingDown() {
18969
- return this.ju;
18991
+ return this.Yu;
18970
18992
  }
18971
18993
  /**
18972
18994
  * Adds a new operation to the queue without waiting for it to complete (i.e.
@@ -18976,44 +18998,44 @@ class __PRIVATE_AsyncQueueImpl {
18976
18998
  this.enqueue(e);
18977
18999
  }
18978
19000
  enqueueAndForgetEvenWhileRestricted(e) {
18979
- this.nc(),
19001
+ this.oc(),
18980
19002
  // eslint-disable-next-line @typescript-eslint/no-floating-promises
18981
- this.rc(e);
19003
+ this._c(e);
18982
19004
  }
18983
19005
  enterRestrictedMode(e) {
18984
- if (!this.ju) {
18985
- this.ju = !0, this.Zu = e || !1;
19006
+ if (!this.Yu) {
19007
+ this.Yu = !0, this.tc = e || !1;
18986
19008
  const t = getDocument();
18987
- t && "function" == typeof t.removeEventListener && t.removeEventListener("visibilitychange", this.ec);
19009
+ t && "function" == typeof t.removeEventListener && t.removeEventListener("visibilitychange", this.rc);
18988
19010
  }
18989
19011
  }
18990
19012
  enqueue(e) {
18991
- if (this.nc(), this.ju)
19013
+ if (this.oc(), this.Yu)
18992
19014
  // Return a Promise which never resolves.
18993
19015
  return new Promise((() => {}));
18994
19016
  // Create a deferred Promise that we can return to the callee. This
18995
19017
  // allows us to return a "hanging Promise" only to the callee and still
18996
19018
  // advance the queue even when the operation is not run.
18997
19019
  const t = new __PRIVATE_Deferred;
18998
- return this.rc((() => this.ju && this.Zu ? Promise.resolve() : (e().then(t.resolve, t.reject),
19020
+ return this._c((() => this.Yu && this.tc ? Promise.resolve() : (e().then(t.resolve, t.reject),
18999
19021
  t.promise))).then((() => t.promise));
19000
19022
  }
19001
19023
  enqueueRetryable(e) {
19002
- this.enqueueAndForget((() => (this.zu.push(e), this.sc())));
19024
+ this.enqueueAndForget((() => (this.Ju.push(e), this.ac())));
19003
19025
  }
19004
19026
  /**
19005
19027
  * Runs the next operation from the retryable queue. If the operation fails,
19006
19028
  * reschedules with backoff.
19007
- */ async sc() {
19008
- if (0 !== this.zu.length) {
19029
+ */ async ac() {
19030
+ if (0 !== this.Ju.length) {
19009
19031
  try {
19010
- await this.zu[0](), this.zu.shift(), this.C_.reset();
19032
+ await this.Ju[0](), this.Ju.shift(), this.x_.reset();
19011
19033
  } catch (e) {
19012
19034
  if (!__PRIVATE_isIndexedDbTransactionError(e)) throw e;
19013
19035
  // Failure will be handled by AsyncQueue
19014
19036
  __PRIVATE_logDebug(an, "Operation failed with retryable error: " + e);
19015
19037
  }
19016
- this.zu.length > 0 &&
19038
+ this.Ju.length > 0 &&
19017
19039
  // If there are additional operations, we re-schedule `retryNextOp()`.
19018
19040
  // This is necessary to run retryable operations that failed during
19019
19041
  // their initial attempt since we don't know whether they are already
@@ -19024,51 +19046,51 @@ class __PRIVATE_AsyncQueueImpl {
19024
19046
  // Since `backoffAndRun()` cancels an existing backoff and schedules a
19025
19047
  // new backoff on every call, there is only ever a single additional
19026
19048
  // operation in the queue.
19027
- this.C_.f_((() => this.sc()));
19049
+ this.x_.y_((() => this.ac()));
19028
19050
  }
19029
19051
  }
19030
- rc(e) {
19031
- const t = this.tc.then((() => (this.Yu = !0, e().catch((e => {
19032
- this.Ju = e, this.Yu = !1;
19052
+ _c(e) {
19053
+ const t = this.sc.then((() => (this.ec = !0, e().catch((e => {
19054
+ this.Xu = e, this.ec = !1;
19033
19055
  // Re-throw the error so that this.tail becomes a rejected Promise and
19034
19056
  // all further attempts to chain (via .then) will just short-circuit
19035
19057
  // and return the rejected Promise.
19036
19058
  throw __PRIVATE_logError("INTERNAL UNHANDLED ERROR: ", __PRIVATE_getMessageOrStack(e)),
19037
19059
  e;
19038
- })).then((e => (this.Yu = !1, e))))));
19039
- return this.tc = t, t;
19060
+ })).then((e => (this.ec = !1, e))))));
19061
+ return this.sc = t, t;
19040
19062
  }
19041
19063
  enqueueAfterDelay(e, t, n) {
19042
- this.nc(),
19064
+ this.oc(),
19043
19065
  // Fast-forward delays for timerIds that have been overridden.
19044
- this.Xu.indexOf(e) > -1 && (t = 0);
19045
- const r = DelayedOperation.createAndSchedule(this, e, t, n, (e => this.oc(e)));
19046
- return this.Hu.push(r), r;
19066
+ this.nc.indexOf(e) > -1 && (t = 0);
19067
+ const r = DelayedOperation.createAndSchedule(this, e, t, n, (e => this.uc(e)));
19068
+ return this.Zu.push(r), r;
19047
19069
  }
19048
- nc() {
19049
- this.Ju && fail(47125, {
19050
- _c: __PRIVATE_getMessageOrStack(this.Ju)
19070
+ oc() {
19071
+ this.Xu && fail(47125, {
19072
+ cc: __PRIVATE_getMessageOrStack(this.Xu)
19051
19073
  });
19052
19074
  }
19053
19075
  verifyOperationInProgress() {}
19054
19076
  /**
19055
19077
  * Waits until all currently queued tasks are finished executing. Delayed
19056
19078
  * operations are not run.
19057
- */ async ac() {
19079
+ */ async lc() {
19058
19080
  // Operations in the queue prior to draining may have enqueued additional
19059
19081
  // operations. Keep draining the queue until the tail is no longer advanced,
19060
19082
  // which indicates that no more new operations were enqueued and that all
19061
19083
  // operations were executed.
19062
19084
  let e;
19063
19085
  do {
19064
- e = this.tc, await e;
19065
- } while (e !== this.tc);
19086
+ e = this.sc, await e;
19087
+ } while (e !== this.sc);
19066
19088
  }
19067
19089
  /**
19068
19090
  * For Tests: Determine if a delayed operation with a particular TimerId
19069
19091
  * exists.
19070
- */ uc(e) {
19071
- for (const t of this.Hu) if (t.timerId === e) return !0;
19092
+ */ hc(e) {
19093
+ for (const t of this.Zu) if (t.timerId === e) return !0;
19072
19094
  return !1;
19073
19095
  }
19074
19096
  /**
@@ -19077,25 +19099,25 @@ class __PRIVATE_AsyncQueueImpl {
19077
19099
  * @param lastTimerId - Delayed operations up to and including this TimerId
19078
19100
  * will be drained. Pass TimerId.All to run all delayed operations.
19079
19101
  * @returns a Promise that resolves once all operations have been run.
19080
- */ cc(e) {
19102
+ */ Pc(e) {
19081
19103
  // Note that draining may generate more delayed ops, so we do that first.
19082
- return this.ac().then((() => {
19104
+ return this.lc().then((() => {
19083
19105
  // Run ops in the same order they'd run if they ran naturally.
19084
19106
  /* eslint-disable-next-line @typescript-eslint/no-floating-promises */
19085
- this.Hu.sort(((e, t) => e.targetTimeMs - t.targetTimeMs));
19086
- for (const t of this.Hu) if (t.skipDelay(), "all" /* TimerId.All */ !== e && t.timerId === e) break;
19087
- return this.ac();
19107
+ this.Zu.sort(((e, t) => e.targetTimeMs - t.targetTimeMs));
19108
+ for (const t of this.Zu) if (t.skipDelay(), "all" /* TimerId.All */ !== e && t.timerId === e) break;
19109
+ return this.lc();
19088
19110
  }));
19089
19111
  }
19090
19112
  /**
19091
19113
  * For Tests: Skip all subsequent delays for a timer id.
19092
- */ lc(e) {
19093
- this.Xu.push(e);
19114
+ */ Tc(e) {
19115
+ this.nc.push(e);
19094
19116
  }
19095
- /** Called once a DelayedOperation is run or canceled. */ oc(e) {
19117
+ /** Called once a DelayedOperation is run or canceled. */ uc(e) {
19096
19118
  // NOTE: indexOf / slice are O(n), but delayedOperations is expected to be small.
19097
- const t = this.Hu.indexOf(e);
19098
- /* eslint-disable-next-line @typescript-eslint/no-floating-promises */ this.Hu.splice(t, 1);
19119
+ const t = this.Zu.indexOf(e);
19120
+ /* eslint-disable-next-line @typescript-eslint/no-floating-promises */ this.Zu.splice(t, 1);
19099
19121
  }
19100
19122
  }
19101
19123
 
@@ -19946,7 +19968,7 @@ function __PRIVATE_isWrite(e) {
19946
19968
 
19947
19969
  default:
19948
19970
  throw fail(40011, {
19949
- hc: e
19971
+ Ic: e
19950
19972
  });
19951
19973
  }
19952
19974
  }
@@ -19974,55 +19996,55 @@ function __PRIVATE_isWrite(e) {
19974
19996
  this.settings = e, this.databaseId = t, this.serializer = n, this.ignoreUndefinedProperties = r,
19975
19997
  // Minor hack: If fieldTransforms is undefined, we assume this is an
19976
19998
  // external call and we need to validate the entire path.
19977
- void 0 === i && this.Pc(), this.fieldTransforms = i || [], this.fieldMask = s || [];
19999
+ void 0 === i && this.Ec(), this.fieldTransforms = i || [], this.fieldMask = s || [];
19978
20000
  }
19979
20001
  get path() {
19980
20002
  return this.settings.path;
19981
20003
  }
19982
- get hc() {
19983
- return this.settings.hc;
20004
+ get Ic() {
20005
+ return this.settings.Ic;
19984
20006
  }
19985
- /** Returns a new context with the specified settings overwritten. */ Tc(e) {
20007
+ /** Returns a new context with the specified settings overwritten. */ dc(e) {
19986
20008
  return new __PRIVATE_ParseContextImpl(Object.assign(Object.assign({}, this.settings), e), this.databaseId, this.serializer, this.ignoreUndefinedProperties, this.fieldTransforms, this.fieldMask);
19987
20009
  }
19988
- Ic(e) {
20010
+ Ac(e) {
19989
20011
  var t;
19990
- const n = null === (t = this.path) || void 0 === t ? void 0 : t.child(e), r = this.Tc({
20012
+ const n = null === (t = this.path) || void 0 === t ? void 0 : t.child(e), r = this.dc({
19991
20013
  path: n,
19992
- Ec: !1
20014
+ Rc: !1
19993
20015
  });
19994
- return r.dc(e), r;
20016
+ return r.Vc(e), r;
19995
20017
  }
19996
- Ac(e) {
20018
+ mc(e) {
19997
20019
  var t;
19998
- const n = null === (t = this.path) || void 0 === t ? void 0 : t.child(e), r = this.Tc({
20020
+ const n = null === (t = this.path) || void 0 === t ? void 0 : t.child(e), r = this.dc({
19999
20021
  path: n,
20000
- Ec: !1
20022
+ Rc: !1
20001
20023
  });
20002
- return r.Pc(), r;
20024
+ return r.Ec(), r;
20003
20025
  }
20004
- Rc(e) {
20026
+ fc(e) {
20005
20027
  // TODO(b/34871131): We don't support array paths right now; so make path
20006
20028
  // undefined.
20007
- return this.Tc({
20029
+ return this.dc({
20008
20030
  path: void 0,
20009
- Ec: !0
20031
+ Rc: !0
20010
20032
  });
20011
20033
  }
20012
- Vc(e) {
20013
- return __PRIVATE_createError(e, this.settings.methodName, this.settings.mc || !1, this.path, this.settings.fc);
20034
+ gc(e) {
20035
+ return __PRIVATE_createError(e, this.settings.methodName, this.settings.yc || !1, this.path, this.settings.wc);
20014
20036
  }
20015
20037
  /** Returns 'true' if 'fieldPath' was traversed when creating this context. */ contains(e) {
20016
20038
  return void 0 !== this.fieldMask.find((t => e.isPrefixOf(t))) || void 0 !== this.fieldTransforms.find((t => e.isPrefixOf(t.field)));
20017
20039
  }
20018
- Pc() {
20040
+ Ec() {
20019
20041
  // TODO(b/34871131): Remove null check once we have proper paths for fields
20020
20042
  // within arrays.
20021
- if (this.path) for (let e = 0; e < this.path.length; e++) this.dc(this.path.get(e));
20043
+ if (this.path) for (let e = 0; e < this.path.length; e++) this.Vc(this.path.get(e));
20022
20044
  }
20023
- dc(e) {
20024
- if (0 === e.length) throw this.Vc("Document fields must not be empty");
20025
- if (__PRIVATE_isWrite(this.hc) && cn.test(e)) throw this.Vc('Document fields cannot begin and end with "__"');
20045
+ Vc(e) {
20046
+ if (0 === e.length) throw this.gc("Document fields must not be empty");
20047
+ if (__PRIVATE_isWrite(this.Ic) && cn.test(e)) throw this.gc('Document fields cannot begin and end with "__"');
20026
20048
  }
20027
20049
  }
20028
20050
 
@@ -20033,14 +20055,14 @@ function __PRIVATE_isWrite(e) {
20033
20055
  constructor(e, t, n) {
20034
20056
  this.databaseId = e, this.ignoreUndefinedProperties = t, this.serializer = n || __PRIVATE_newSerializer(e);
20035
20057
  }
20036
- /** Creates a new top-level parse context. */ gc(e, t, n, r = !1) {
20058
+ /** Creates a new top-level parse context. */ bc(e, t, n, r = !1) {
20037
20059
  return new __PRIVATE_ParseContextImpl({
20038
- hc: e,
20060
+ Ic: e,
20039
20061
  methodName: t,
20040
- fc: n,
20062
+ wc: n,
20041
20063
  path: FieldPath$1.emptyPath(),
20042
- Ec: !1,
20043
- mc: r
20064
+ Rc: !1,
20065
+ yc: r
20044
20066
  }, this.databaseId, this.serializer, this.ignoreUndefinedProperties);
20045
20067
  }
20046
20068
  }
@@ -20051,7 +20073,7 @@ function __PRIVATE_newUserDataReader(e) {
20051
20073
  }
20052
20074
 
20053
20075
  /** Parse document data from a set() call. */ function __PRIVATE_parseSetData(e, t, n, r, i, s = {}) {
20054
- const o = e.gc(s.merge || s.mergeFields ? 2 /* UserDataSource.MergeSet */ : 0 /* UserDataSource.Set */ , t, n, i);
20076
+ const o = e.bc(s.merge || s.mergeFields ? 2 /* UserDataSource.MergeSet */ : 0 /* UserDataSource.Set */ , t, n, i);
20055
20077
  __PRIVATE_validatePlainObject("Data must be an object, but it was:", o, r);
20056
20078
  const _ = __PRIVATE_parseObject(r, o);
20057
20079
  let a, u;
@@ -20069,7 +20091,7 @@ function __PRIVATE_newUserDataReader(e) {
20069
20091
 
20070
20092
  class __PRIVATE_DeleteFieldValueImpl extends FieldValue {
20071
20093
  _toFieldTransform(e) {
20072
- if (2 /* UserDataSource.MergeSet */ !== e.hc) throw 1 /* UserDataSource.Update */ === e.hc ? e.Vc(`${this._methodName}() can only appear at the top level of your update data`) : e.Vc(`${this._methodName}() cannot be used with set() unless you pass {merge:true}`);
20094
+ if (2 /* UserDataSource.MergeSet */ !== e.Ic) throw 1 /* UserDataSource.Update */ === e.Ic ? e.gc(`${this._methodName}() can only appear at the top level of your update data`) : e.gc(`${this._methodName}() cannot be used with set() unless you pass {merge:true}`);
20073
20095
  // No transform to add for a delete, but we need to add it to our
20074
20096
  // fieldMask so it gets deleted.
20075
20097
  return e.fieldMask.push(e.path), null;
@@ -20096,10 +20118,10 @@ class __PRIVATE_DeleteFieldValueImpl extends FieldValue {
20096
20118
  * @param arrayElement - Whether or not the FieldValue has an array.
20097
20119
  */ function __PRIVATE_createSentinelChildContext(e, t, n) {
20098
20120
  return new __PRIVATE_ParseContextImpl({
20099
- hc: 3 /* UserDataSource.Argument */ ,
20100
- fc: t.settings.fc,
20121
+ Ic: 3 /* UserDataSource.Argument */ ,
20122
+ wc: t.settings.wc,
20101
20123
  methodName: e._methodName,
20102
- Ec: n
20124
+ Rc: n
20103
20125
  }, t.databaseId, t.serializer, t.ignoreUndefinedProperties);
20104
20126
  }
20105
20127
 
@@ -20114,47 +20136,47 @@ class __PRIVATE_ServerTimestampFieldValueImpl extends FieldValue {
20114
20136
 
20115
20137
  class __PRIVATE_ArrayUnionFieldValueImpl extends FieldValue {
20116
20138
  constructor(e, t) {
20117
- super(e), this.yc = t;
20139
+ super(e), this.Sc = t;
20118
20140
  }
20119
20141
  _toFieldTransform(e) {
20120
20142
  const t = __PRIVATE_createSentinelChildContext(this, e,
20121
- /*array=*/ !0), n = this.yc.map((e => __PRIVATE_parseData(e, t))), r = new __PRIVATE_ArrayUnionTransformOperation(n);
20143
+ /*array=*/ !0), n = this.Sc.map((e => __PRIVATE_parseData(e, t))), r = new __PRIVATE_ArrayUnionTransformOperation(n);
20122
20144
  return new FieldTransform(e.path, r);
20123
20145
  }
20124
20146
  isEqual(e) {
20125
- return e instanceof __PRIVATE_ArrayUnionFieldValueImpl && util.deepEqual(this.yc, e.yc);
20147
+ return e instanceof __PRIVATE_ArrayUnionFieldValueImpl && util.deepEqual(this.Sc, e.Sc);
20126
20148
  }
20127
20149
  }
20128
20150
 
20129
20151
  class __PRIVATE_ArrayRemoveFieldValueImpl extends FieldValue {
20130
20152
  constructor(e, t) {
20131
- super(e), this.yc = t;
20153
+ super(e), this.Sc = t;
20132
20154
  }
20133
20155
  _toFieldTransform(e) {
20134
20156
  const t = __PRIVATE_createSentinelChildContext(this, e,
20135
- /*array=*/ !0), n = this.yc.map((e => __PRIVATE_parseData(e, t))), r = new __PRIVATE_ArrayRemoveTransformOperation(n);
20157
+ /*array=*/ !0), n = this.Sc.map((e => __PRIVATE_parseData(e, t))), r = new __PRIVATE_ArrayRemoveTransformOperation(n);
20136
20158
  return new FieldTransform(e.path, r);
20137
20159
  }
20138
20160
  isEqual(e) {
20139
- return e instanceof __PRIVATE_ArrayRemoveFieldValueImpl && util.deepEqual(this.yc, e.yc);
20161
+ return e instanceof __PRIVATE_ArrayRemoveFieldValueImpl && util.deepEqual(this.Sc, e.Sc);
20140
20162
  }
20141
20163
  }
20142
20164
 
20143
20165
  class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
20144
20166
  constructor(e, t) {
20145
- super(e), this.wc = t;
20167
+ super(e), this.Dc = t;
20146
20168
  }
20147
20169
  _toFieldTransform(e) {
20148
- const t = new __PRIVATE_NumericIncrementTransformOperation(e.serializer, toNumber(e.serializer, this.wc));
20170
+ const t = new __PRIVATE_NumericIncrementTransformOperation(e.serializer, toNumber(e.serializer, this.Dc));
20149
20171
  return new FieldTransform(e.path, t);
20150
20172
  }
20151
20173
  isEqual(e) {
20152
- return e instanceof __PRIVATE_NumericIncrementFieldValueImpl && this.wc === e.wc;
20174
+ return e instanceof __PRIVATE_NumericIncrementFieldValueImpl && this.Dc === e.Dc;
20153
20175
  }
20154
20176
  }
20155
20177
 
20156
20178
  /** Parse update data from an update() call. */ function __PRIVATE_parseUpdateData(e, t, n, r) {
20157
- const i = e.gc(1 /* UserDataSource.Update */ , t, n);
20179
+ const i = e.bc(1 /* UserDataSource.Update */ , t, n);
20158
20180
  __PRIVATE_validatePlainObject("Data must be an object, but it was:", i, r);
20159
20181
  const s = [], o = ObjectValue.empty();
20160
20182
  forEach(r, ((e, r) => {
@@ -20162,7 +20184,7 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
20162
20184
  // For Compat types, we have to "extract" the underlying types before
20163
20185
  // performing validation.
20164
20186
  r = util.getModularInstance(r);
20165
- const a = i.Ac(_);
20187
+ const a = i.mc(_);
20166
20188
  if (r instanceof __PRIVATE_DeleteFieldValueImpl)
20167
20189
  // Add it to the field mask, but don't add anything to updateData.
20168
20190
  s.push(_); else {
@@ -20175,7 +20197,7 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
20175
20197
  }
20176
20198
 
20177
20199
  /** Parse update data from a list of field/value arguments. */ function __PRIVATE_parseUpdateVarargs(e, t, n, r, i, s) {
20178
- const o = e.gc(1 /* UserDataSource.Update */ , t, n), _ = [ __PRIVATE_fieldPathFromArgument$1(t, r, n) ], a = [ i ];
20200
+ const o = e.bc(1 /* UserDataSource.Update */ , t, n), _ = [ __PRIVATE_fieldPathFromArgument$1(t, r, n) ], a = [ i ];
20179
20201
  if (s.length % 2 != 0) throw new FirestoreError(N.INVALID_ARGUMENT, `Function ${t}() needs to be called with an even number of arguments that alternate between field names and values.`);
20180
20202
  for (let e = 0; e < s.length; e += 2) _.push(__PRIVATE_fieldPathFromArgument$1(t, s[e])),
20181
20203
  a.push(s[e + 1]);
@@ -20188,7 +20210,7 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
20188
20210
  // For Compat types, we have to "extract" the underlying types before
20189
20211
  // performing validation.
20190
20212
  n = util.getModularInstance(n);
20191
- const r = o.Ac(t);
20213
+ const r = o.mc(t);
20192
20214
  if (n instanceof __PRIVATE_DeleteFieldValueImpl)
20193
20215
  // Add it to the field mask, but don't add anything to updateData.
20194
20216
  u.push(t); else {
@@ -20207,7 +20229,7 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
20207
20229
  * @param allowArrays - Whether the query value is an array that may directly
20208
20230
  * contain additional arrays (e.g. the operand of an `in` query).
20209
20231
  */ function __PRIVATE_parseQueryValue(e, t, n, r = !1) {
20210
- return __PRIVATE_parseData(n, e.gc(r ? 4 /* UserDataSource.ArrayArgument */ : 3 /* UserDataSource.Argument */ , t));
20232
+ return __PRIVATE_parseData(n, e.bc(r ? 4 /* UserDataSource.ArrayArgument */ : 3 /* UserDataSource.Argument */ , t));
20211
20233
  }
20212
20234
 
20213
20235
  /**
@@ -20236,8 +20258,8 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
20236
20258
  */
20237
20259
  return function __PRIVATE_parseSentinelFieldValue(e, t) {
20238
20260
  // Sentinels are only supported with writes, and not within arrays.
20239
- if (!__PRIVATE_isWrite(t.hc)) throw t.Vc(`${e._methodName}() can only be used with update() and set()`);
20240
- if (!t.path) throw t.Vc(`${e._methodName}() is not currently supported inside arrays`);
20261
+ if (!__PRIVATE_isWrite(t.Ic)) throw t.gc(`${e._methodName}() can only be used with update() and set()`);
20262
+ if (!t.path) throw t.gc(`${e._methodName}() is not currently supported inside arrays`);
20241
20263
  const n = e._toFieldTransform(t);
20242
20264
  n && t.fieldTransforms.push(n);
20243
20265
  }
@@ -20261,12 +20283,12 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
20261
20283
  // the set of values to be included for the IN query) that may directly
20262
20284
  // contain additional arrays (each representing an individual field
20263
20285
  // value), so we disable this validation.
20264
- if (t.settings.Ec && 4 /* UserDataSource.ArrayArgument */ !== t.hc) throw t.Vc("Nested arrays are not supported");
20286
+ if (t.settings.Rc && 4 /* UserDataSource.ArrayArgument */ !== t.Ic) throw t.gc("Nested arrays are not supported");
20265
20287
  return function __PRIVATE_parseArray(e, t) {
20266
20288
  const n = [];
20267
20289
  let r = 0;
20268
20290
  for (const i of e) {
20269
- let e = __PRIVATE_parseData(i, t.Rc(r));
20291
+ let e = __PRIVATE_parseData(i, t.fc(r));
20270
20292
  null == e && (
20271
20293
  // Just include nulls in the array for fields being replaced with a
20272
20294
  // sentinel.
@@ -20318,7 +20340,7 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
20318
20340
  };
20319
20341
  if (e instanceof DocumentReference) {
20320
20342
  const n = t.databaseId, r = e.firestore._databaseId;
20321
- if (!r.isEqual(n)) throw t.Vc(`Document reference is for database ${r.projectId}/${r.database} but should be for database ${n.projectId}/${n.database}`);
20343
+ if (!r.isEqual(n)) throw t.gc(`Document reference is for database ${r.projectId}/${r.database} but should be for database ${n.projectId}/${n.database}`);
20322
20344
  return {
20323
20345
  referenceValue: __PRIVATE_toResourceName(e.firestore._databaseId || t.databaseId, e._key.path)
20324
20346
  };
@@ -20336,7 +20358,7 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
20336
20358
  [Tt]: {
20337
20359
  arrayValue: {
20338
20360
  values: e.toArray().map((e => {
20339
- if ("number" != typeof e) throw t.Vc("VectorValues must only contain numeric values.");
20361
+ if ("number" != typeof e) throw t.gc("VectorValues must only contain numeric values.");
20340
20362
  return __PRIVATE_toDouble(t.serializer, e);
20341
20363
  }))
20342
20364
  }
@@ -20354,7 +20376,7 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
20354
20376
  * GeoPoints, etc. are not considered to look like JSON objects since they map
20355
20377
  * to specific FieldValue types other than ObjectValue.
20356
20378
  */ (e, t);
20357
- throw t.Vc(`Unsupported field value: ${__PRIVATE_valueDescription(e)}`);
20379
+ throw t.gc(`Unsupported field value: ${__PRIVATE_valueDescription(e)}`);
20358
20380
  }(e, t);
20359
20381
  }
20360
20382
 
@@ -20364,7 +20386,7 @@ function __PRIVATE_parseObject(e, t) {
20364
20386
  // If we encounter an empty object, we explicitly add it to the update
20365
20387
  // mask to ensure that the server creates a map entry.
20366
20388
  t.path && t.path.length > 0 && t.fieldMask.push(t.path) : forEach(e, ((e, r) => {
20367
- const i = __PRIVATE_parseData(r, t.Ic(e));
20389
+ const i = __PRIVATE_parseData(r, t.Ac(e));
20368
20390
  null != i && (n[e] = i);
20369
20391
  })), {
20370
20392
  mapValue: {
@@ -20382,7 +20404,7 @@ function __PRIVATE_validatePlainObject(e, t, n) {
20382
20404
  return "object" == typeof e && null !== e && (Object.getPrototypeOf(e) === Object.prototype || null === Object.getPrototypeOf(e));
20383
20405
  }(n)) {
20384
20406
  const r = __PRIVATE_valueDescription(n);
20385
- throw "an object" === r ? t.Vc(e + " a custom object") : t.Vc(e + " " + r);
20407
+ throw "an object" === r ? t.gc(e + " a custom object") : t.gc(e + " " + r);
20386
20408
  }
20387
20409
  }
20388
20410
 
@@ -21687,7 +21709,7 @@ function onSnapshot(e, ...t) {
21687
21709
  const i = new __PRIVATE_AsyncObserver(r), s = new __PRIVATE_QueryListener(t, i, n);
21688
21710
  return e.asyncQueue.enqueueAndForget((async () => __PRIVATE_eventManagerListen(await __PRIVATE_getEventManager(e), s))),
21689
21711
  () => {
21690
- i.Cu(), e.asyncQueue.enqueueAndForget((async () => __PRIVATE_eventManagerUnlisten(await __PRIVATE_getEventManager(e), s)));
21712
+ i.xu(), e.asyncQueue.enqueueAndForget((async () => __PRIVATE_eventManagerUnlisten(await __PRIVATE_getEventManager(e), s)));
21691
21713
  };
21692
21714
  }(ensureFirestoreConfigured(u), c, _, a);
21693
21715
  }
@@ -22196,7 +22218,7 @@ function __PRIVATE_validateReference(e, t) {
22196
22218
  const r = new __PRIVATE_Deferred;
22197
22219
  return e.asyncQueue.enqueueAndForget((async () => {
22198
22220
  const i = await __PRIVATE_getDatastore(e);
22199
- new __PRIVATE_TransactionRunner(e.asyncQueue, i, n, t, r).$u();
22221
+ new __PRIVATE_TransactionRunner(e.asyncQueue, i, n, t, r).Wu();
22200
22222
  })), r.promise;
22201
22223
  }(ensureFirestoreConfigured(e), (n => t(new Transaction(e, n))), r);
22202
22224
  }
@@ -22563,7 +22585,7 @@ function _internalQueryToProtoQueryTarget(e) {
22563
22585
  * The implementation of `TestingHooksSpi`.
22564
22586
  */ class __PRIVATE_TestingHooksSpiImpl {
22565
22587
  constructor() {
22566
- this.Sc = new Map;
22588
+ this.vc = new Map;
22567
22589
  }
22568
22590
  static get instance() {
22569
22591
  return Tn || (Tn = new __PRIVATE_TestingHooksSpiImpl, function __PRIVATE_setTestingHooksSpi(e) {
@@ -22572,10 +22594,10 @@ function _internalQueryToProtoQueryTarget(e) {
22572
22594
  }(Tn)), Tn;
22573
22595
  }
22574
22596
  ht(e) {
22575
- this.Sc.forEach((t => t(e)));
22597
+ this.vc.forEach((t => t(e)));
22576
22598
  }
22577
22599
  onExistenceFilterMismatch(e) {
22578
- const t = Symbol(), n = this.Sc;
22600
+ const t = Symbol(), n = this.vc;
22579
22601
  return n.set(t, e), () => n.delete(t);
22580
22602
  }
22581
22603
  }