@firebase/firestore 4.7.16 → 4.7.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/firestore/src/platform/browser/webchannel_connection.d.ts +17 -0
- package/dist/index.cjs.js +640 -618
- package/dist/index.cjs.js.map +1 -1
- package/dist/index.esm2017.js +640 -618
- package/dist/index.esm2017.js.map +1 -1
- package/dist/index.node.cjs.js +2 -2
- package/dist/index.node.mjs +2 -2
- package/dist/index.rn.js +596 -574
- package/dist/index.rn.js.map +1 -1
- package/dist/lite/firestore/src/platform/browser/webchannel_connection.d.ts +17 -0
- package/dist/lite/index.browser.esm2017.js +2 -2
- package/dist/lite/index.cjs.js +2 -2
- package/dist/lite/index.node.cjs.js +2 -2
- package/dist/lite/index.node.mjs +2 -2
- package/dist/lite/index.rn.esm2017.js +2 -2
- package/package.json +3 -3
package/dist/index.rn.js
CHANGED
|
@@ -10,7 +10,7 @@ import { Integer as p, Md5 as y } from "@firebase/webchannel-wrapper/bloom-blob"
|
|
|
10
10
|
|
|
11
11
|
import { XhrIo as w, EventType as S, ErrorCode as b, createWebChannelTransport as D, getStatEventTarget as v, WebChannel as C, Event as F, Stat as M } from "@firebase/webchannel-wrapper/webchannel-blob";
|
|
12
12
|
|
|
13
|
-
const x = "@firebase/firestore", O = "4.7.
|
|
13
|
+
const x = "@firebase/firestore", O = "4.7.17";
|
|
14
14
|
|
|
15
15
|
/**
|
|
16
16
|
* @license
|
|
@@ -72,7 +72,7 @@ User.MOCK_USER = new User("mock-user");
|
|
|
72
72
|
* See the License for the specific language governing permissions and
|
|
73
73
|
* limitations under the License.
|
|
74
74
|
*/
|
|
75
|
-
let N = "11.
|
|
75
|
+
let N = "11.9.0";
|
|
76
76
|
|
|
77
77
|
/**
|
|
78
78
|
* @license
|
|
@@ -14219,7 +14219,9 @@ class __PRIVATE_RestConnection {
|
|
|
14219
14219
|
|
|
14220
14220
|
class __PRIVATE_WebChannelConnection extends __PRIVATE_RestConnection {
|
|
14221
14221
|
constructor(e) {
|
|
14222
|
-
super(e),
|
|
14222
|
+
super(e),
|
|
14223
|
+
/** A collection of open WebChannel instances */
|
|
14224
|
+
this.l_ = [], this.forceLongPolling = e.forceLongPolling, this.autoDetectLongPolling = e.autoDetectLongPolling,
|
|
14223
14225
|
this.useFetchStreams = e.useFetchStreams, this.longPollingOptions = e.longPollingOptions;
|
|
14224
14226
|
}
|
|
14225
14227
|
Jo(e, t, n, r, i) {
|
|
@@ -14261,10 +14263,10 @@ class __PRIVATE_WebChannelConnection extends __PRIVATE_RestConnection {
|
|
|
14261
14263
|
|
|
14262
14264
|
default:
|
|
14263
14265
|
fail(9055, {
|
|
14264
|
-
|
|
14266
|
+
h_: e,
|
|
14265
14267
|
streamId: s,
|
|
14266
|
-
|
|
14267
|
-
|
|
14268
|
+
P_: _.getLastErrorCode(),
|
|
14269
|
+
T_: _.getLastError()
|
|
14268
14270
|
});
|
|
14269
14271
|
}
|
|
14270
14272
|
} finally {
|
|
@@ -14275,7 +14277,7 @@ class __PRIVATE_WebChannelConnection extends __PRIVATE_RestConnection {
|
|
|
14275
14277
|
__PRIVATE_logDebug(en, `RPC '${e}' ${s} sending request:`, r), _.send(t, "POST", a, n, 15);
|
|
14276
14278
|
}));
|
|
14277
14279
|
}
|
|
14278
|
-
|
|
14280
|
+
I_(e, t, n) {
|
|
14279
14281
|
const r = __PRIVATE_generateUniqueDebugId(), i = [ this.Ko, "/", "google.firestore.v1.Firestore", "/", e, "/channel" ], s = D(), o = v(), _ = {
|
|
14280
14282
|
// Required for backend stickiness, routing behavior is based on this
|
|
14281
14283
|
// parameter.
|
|
@@ -14315,12 +14317,13 @@ class __PRIVATE_WebChannelConnection extends __PRIVATE_RestConnection {
|
|
|
14315
14317
|
const u = i.join("");
|
|
14316
14318
|
__PRIVATE_logDebug(en, `Creating RPC '${e}' stream ${r}: ${u}`, _);
|
|
14317
14319
|
const c = s.createWebChannel(u, _);
|
|
14320
|
+
this.E_(c);
|
|
14318
14321
|
// WebChannel supports sending the first message with the handshake - saving
|
|
14319
14322
|
// a network round trip. However, it will have to call send in the same
|
|
14320
14323
|
// JS event loop as open. In order to enforce this, we delay actually
|
|
14321
14324
|
// opening the WebChannel until send is called. Whether we have called
|
|
14322
14325
|
// open is tracked with this variable.
|
|
14323
|
-
|
|
14326
|
+
let l = !1, h = !1;
|
|
14324
14327
|
// A flag to determine whether the stream was closed (by us or through an
|
|
14325
14328
|
// error/close event) to avoid delivering multiple close events or sending
|
|
14326
14329
|
// on a closed stream
|
|
@@ -14352,7 +14355,7 @@ class __PRIVATE_WebChannelConnection extends __PRIVATE_RestConnection {
|
|
|
14352
14355
|
h || (__PRIVATE_logDebug(en, `RPC '${e}' stream ${r} transport opened.`), P.__());
|
|
14353
14356
|
})), __PRIVATE_unguardedEventListen(c, C.EventType.CLOSE, (() => {
|
|
14354
14357
|
h || (h = !0, __PRIVATE_logDebug(en, `RPC '${e}' stream ${r} transport closed`),
|
|
14355
|
-
P.u_());
|
|
14358
|
+
P.u_(), this.d_(c));
|
|
14356
14359
|
})), __PRIVATE_unguardedEventListen(c, C.EventType.ERROR, (t => {
|
|
14357
14360
|
h || (h = !0, __PRIVATE_logWarn(en, `RPC '${e}' stream ${r} transport errored. Name:`, t.name, "Message:", t.message),
|
|
14358
14361
|
P.u_(new FirestoreError(L.UNAVAILABLE, "The operation could not be completed")));
|
|
@@ -14399,6 +14402,25 @@ class __PRIVATE_WebChannelConnection extends __PRIVATE_RestConnection {
|
|
|
14399
14402
|
P.a_();
|
|
14400
14403
|
}), 0), P;
|
|
14401
14404
|
}
|
|
14405
|
+
/**
|
|
14406
|
+
* Closes and cleans up any resources associated with the connection.
|
|
14407
|
+
*/ terminate() {
|
|
14408
|
+
// If the Firestore instance is terminated, we will explicitly
|
|
14409
|
+
// close any remaining open WebChannel instances.
|
|
14410
|
+
this.l_.forEach((e => e.close())), this.l_ = [];
|
|
14411
|
+
}
|
|
14412
|
+
/**
|
|
14413
|
+
* Add a WebChannel instance to the collection of open instances.
|
|
14414
|
+
* @param webChannel
|
|
14415
|
+
*/ E_(e) {
|
|
14416
|
+
this.l_.push(e);
|
|
14417
|
+
}
|
|
14418
|
+
/**
|
|
14419
|
+
* Remove a WebChannel instance from the collection of open instances.
|
|
14420
|
+
* @param webChannel
|
|
14421
|
+
*/ d_(e) {
|
|
14422
|
+
this.l_ = this.l_.filter((t => t === e));
|
|
14423
|
+
}
|
|
14402
14424
|
}
|
|
14403
14425
|
|
|
14404
14426
|
/**
|
|
@@ -14516,10 +14538,10 @@ class __PRIVATE_ExponentialBackoff {
|
|
|
14516
14538
|
* Note that jitter will still be applied, so the actual delay could be as
|
|
14517
14539
|
* much as 1.5*maxDelayMs.
|
|
14518
14540
|
*/ , i = 6e4) {
|
|
14519
|
-
this.xi = e, this.timerId = t, this.
|
|
14520
|
-
this.
|
|
14541
|
+
this.xi = e, this.timerId = t, this.A_ = n, this.R_ = r, this.V_ = i, this.m_ = 0,
|
|
14542
|
+
this.f_ = null,
|
|
14521
14543
|
/** The last backoff attempt, as epoch milliseconds. */
|
|
14522
|
-
this.
|
|
14544
|
+
this.g_ = Date.now(), this.reset();
|
|
14523
14545
|
}
|
|
14524
14546
|
/**
|
|
14525
14547
|
* Resets the backoff delay.
|
|
@@ -14528,40 +14550,40 @@ class __PRIVATE_ExponentialBackoff {
|
|
|
14528
14550
|
* (i.e. due to an error), initialDelayMs (plus jitter) will be used, and
|
|
14529
14551
|
* subsequent ones will increase according to the backoffFactor.
|
|
14530
14552
|
*/ reset() {
|
|
14531
|
-
this.
|
|
14553
|
+
this.m_ = 0;
|
|
14532
14554
|
}
|
|
14533
14555
|
/**
|
|
14534
14556
|
* Resets the backoff delay to the maximum delay (e.g. for use after a
|
|
14535
14557
|
* RESOURCE_EXHAUSTED error).
|
|
14536
|
-
*/
|
|
14537
|
-
this.
|
|
14558
|
+
*/ p_() {
|
|
14559
|
+
this.m_ = this.V_;
|
|
14538
14560
|
}
|
|
14539
14561
|
/**
|
|
14540
14562
|
* Returns a promise that resolves after currentDelayMs, and increases the
|
|
14541
14563
|
* delay for any subsequent attempts. If there was a pending backoff operation
|
|
14542
14564
|
* already, it will be canceled.
|
|
14543
|
-
*/
|
|
14565
|
+
*/ y_(e) {
|
|
14544
14566
|
// Cancel any pending backoff operation.
|
|
14545
14567
|
this.cancel();
|
|
14546
14568
|
// First schedule using the current base (which may be 0 and should be
|
|
14547
14569
|
// honored as such).
|
|
14548
|
-
const t = Math.floor(this.
|
|
14570
|
+
const t = Math.floor(this.m_ + this.w_()), n = Math.max(0, Date.now() - this.g_), r = Math.max(0, t - n);
|
|
14549
14571
|
// Guard against lastAttemptTime being in the future due to a clock change.
|
|
14550
|
-
r > 0 && __PRIVATE_logDebug("ExponentialBackoff", `Backing off for ${r} ms (base delay: ${this.
|
|
14551
|
-
this.
|
|
14572
|
+
r > 0 && __PRIVATE_logDebug("ExponentialBackoff", `Backing off for ${r} ms (base delay: ${this.m_} ms, delay with jitter: ${t} ms, last attempt: ${n} ms ago)`),
|
|
14573
|
+
this.f_ = this.xi.enqueueAfterDelay(this.timerId, r, (() => (this.g_ = Date.now(),
|
|
14552
14574
|
e()))),
|
|
14553
14575
|
// Apply backoff factor to determine next delay and ensure it is within
|
|
14554
14576
|
// bounds.
|
|
14555
|
-
this.
|
|
14577
|
+
this.m_ *= this.R_, this.m_ < this.A_ && (this.m_ = this.A_), this.m_ > this.V_ && (this.m_ = this.V_);
|
|
14556
14578
|
}
|
|
14557
|
-
|
|
14558
|
-
null !== this.
|
|
14579
|
+
S_() {
|
|
14580
|
+
null !== this.f_ && (this.f_.skipDelay(), this.f_ = null);
|
|
14559
14581
|
}
|
|
14560
14582
|
cancel() {
|
|
14561
|
-
null !== this.
|
|
14583
|
+
null !== this.f_ && (this.f_.cancel(), this.f_ = null);
|
|
14562
14584
|
}
|
|
14563
|
-
/** Returns a random value in the range [-currentBaseMs/2, currentBaseMs/2] */
|
|
14564
|
-
return (Math.random() - .5) * this.
|
|
14585
|
+
/** Returns a random value in the range [-currentBaseMs/2, currentBaseMs/2] */ w_() {
|
|
14586
|
+
return (Math.random() - .5) * this.m_;
|
|
14565
14587
|
}
|
|
14566
14588
|
}
|
|
14567
14589
|
|
|
@@ -14617,18 +14639,18 @@ class __PRIVATE_ExponentialBackoff {
|
|
|
14617
14639
|
*/
|
|
14618
14640
|
class __PRIVATE_PersistentStream {
|
|
14619
14641
|
constructor(e, t, n, r, i, s, o, _) {
|
|
14620
|
-
this.xi = e, this.
|
|
14642
|
+
this.xi = e, this.b_ = n, this.D_ = r, this.connection = i, this.authCredentialsProvider = s,
|
|
14621
14643
|
this.appCheckCredentialsProvider = o, this.listener = _, this.state = 0 /* PersistentStreamState.Initial */ ,
|
|
14622
14644
|
/**
|
|
14623
14645
|
* A close count that's incremented every time the stream is closed; used by
|
|
14624
14646
|
* getCloseGuardedDispatcher() to invalidate callbacks that happen after
|
|
14625
14647
|
* close.
|
|
14626
14648
|
*/
|
|
14627
|
-
this.
|
|
14649
|
+
this.v_ = 0, this.C_ = null, this.F_ = null, this.stream = null,
|
|
14628
14650
|
/**
|
|
14629
14651
|
* Count of response messages received.
|
|
14630
14652
|
*/
|
|
14631
|
-
this.
|
|
14653
|
+
this.M_ = 0, this.x_ = new __PRIVATE_ExponentialBackoff(e, t);
|
|
14632
14654
|
}
|
|
14633
14655
|
/**
|
|
14634
14656
|
* Returns true if start() has been called and no error has occurred. True
|
|
@@ -14636,13 +14658,13 @@ class __PRIVATE_PersistentStream {
|
|
|
14636
14658
|
* encompasses respecting backoff, getting auth tokens, and starting the
|
|
14637
14659
|
* actual RPC). Use isOpen() to determine if the stream is open and ready for
|
|
14638
14660
|
* outbound requests.
|
|
14639
|
-
*/
|
|
14640
|
-
return 1 /* PersistentStreamState.Starting */ === this.state || 5 /* PersistentStreamState.Backoff */ === this.state || this.
|
|
14661
|
+
*/ O_() {
|
|
14662
|
+
return 1 /* PersistentStreamState.Starting */ === this.state || 5 /* PersistentStreamState.Backoff */ === this.state || this.N_();
|
|
14641
14663
|
}
|
|
14642
14664
|
/**
|
|
14643
14665
|
* Returns true if the underlying RPC is open (the onOpen() listener has been
|
|
14644
14666
|
* called) and the stream is ready for outbound requests.
|
|
14645
|
-
*/
|
|
14667
|
+
*/ N_() {
|
|
14646
14668
|
return 2 /* PersistentStreamState.Open */ === this.state || 3 /* PersistentStreamState.Healthy */ === this.state;
|
|
14647
14669
|
}
|
|
14648
14670
|
/**
|
|
@@ -14652,7 +14674,7 @@ class __PRIVATE_PersistentStream {
|
|
|
14652
14674
|
*
|
|
14653
14675
|
* When start returns, isStarted() will return true.
|
|
14654
14676
|
*/ start() {
|
|
14655
|
-
this.
|
|
14677
|
+
this.M_ = 0, 4 /* PersistentStreamState.Error */ !== this.state ? this.auth() : this.B_();
|
|
14656
14678
|
}
|
|
14657
14679
|
/**
|
|
14658
14680
|
* Stops the RPC. This call is idempotent and allowed regardless of the
|
|
@@ -14660,7 +14682,7 @@ class __PRIVATE_PersistentStream {
|
|
|
14660
14682
|
*
|
|
14661
14683
|
* When stop returns, isStarted() and isOpen() will both return false.
|
|
14662
14684
|
*/ async stop() {
|
|
14663
|
-
this.
|
|
14685
|
+
this.O_() && await this.close(0 /* PersistentStreamState.Initial */);
|
|
14664
14686
|
}
|
|
14665
14687
|
/**
|
|
14666
14688
|
* After an error the stream will usually back off on the next attempt to
|
|
@@ -14669,8 +14691,8 @@ class __PRIVATE_PersistentStream {
|
|
|
14669
14691
|
*
|
|
14670
14692
|
* Each error will call the onClose() listener. That function can decide to
|
|
14671
14693
|
* inhibit backoff if required.
|
|
14672
|
-
*/
|
|
14673
|
-
this.state = 0 /* PersistentStreamState.Initial */ , this.
|
|
14694
|
+
*/ L_() {
|
|
14695
|
+
this.state = 0 /* PersistentStreamState.Initial */ , this.x_.reset();
|
|
14674
14696
|
}
|
|
14675
14697
|
/**
|
|
14676
14698
|
* Marks this stream as idle. If no further actions are performed on the
|
|
@@ -14681,25 +14703,25 @@ class __PRIVATE_PersistentStream {
|
|
|
14681
14703
|
*
|
|
14682
14704
|
* Only streams that are in state 'Open' can be marked idle, as all other
|
|
14683
14705
|
* states imply pending network operations.
|
|
14684
|
-
*/
|
|
14706
|
+
*/ k_() {
|
|
14685
14707
|
// Starts the idle time if we are in state 'Open' and are not yet already
|
|
14686
14708
|
// running a timer (in which case the previous idle timeout still applies).
|
|
14687
|
-
this.
|
|
14709
|
+
this.N_() && null === this.C_ && (this.C_ = this.xi.enqueueAfterDelay(this.b_, 6e4, (() => this.q_())));
|
|
14688
14710
|
}
|
|
14689
|
-
/** Sends a message to the underlying stream. */
|
|
14690
|
-
this.
|
|
14711
|
+
/** Sends a message to the underlying stream. */ Q_(e) {
|
|
14712
|
+
this.U_(), this.stream.send(e);
|
|
14691
14713
|
}
|
|
14692
|
-
/** Called by the idle timer when the stream should close due to inactivity. */ async
|
|
14693
|
-
if (this.
|
|
14714
|
+
/** Called by the idle timer when the stream should close due to inactivity. */ async q_() {
|
|
14715
|
+
if (this.N_())
|
|
14694
14716
|
// When timing out an idle stream there's no reason to force the stream into backoff when
|
|
14695
14717
|
// it restarts so set the stream state to Initial instead of Error.
|
|
14696
14718
|
return this.close(0 /* PersistentStreamState.Initial */);
|
|
14697
14719
|
}
|
|
14698
|
-
/** Marks the stream as active again. */
|
|
14699
|
-
this.
|
|
14720
|
+
/** Marks the stream as active again. */ U_() {
|
|
14721
|
+
this.C_ && (this.C_.cancel(), this.C_ = null);
|
|
14700
14722
|
}
|
|
14701
|
-
/** Cancels the health check delayed operation. */
|
|
14702
|
-
this.
|
|
14723
|
+
/** Cancels the health check delayed operation. */ K_() {
|
|
14724
|
+
this.F_ && (this.F_.cancel(), this.F_ = null);
|
|
14703
14725
|
}
|
|
14704
14726
|
/**
|
|
14705
14727
|
* Closes the stream and cleans up as necessary:
|
|
@@ -14715,15 +14737,15 @@ class __PRIVATE_PersistentStream {
|
|
|
14715
14737
|
* @param error - the error the connection was closed with.
|
|
14716
14738
|
*/ async close(e, t) {
|
|
14717
14739
|
// Cancel any outstanding timers (they're guaranteed not to execute).
|
|
14718
|
-
this.
|
|
14740
|
+
this.U_(), this.K_(), this.x_.cancel(),
|
|
14719
14741
|
// Invalidates any stream-related callbacks (e.g. from auth or the
|
|
14720
14742
|
// underlying stream), guaranteeing they won't execute.
|
|
14721
|
-
this.
|
|
14743
|
+
this.v_++, 4 /* PersistentStreamState.Error */ !== e ?
|
|
14722
14744
|
// If this is an intentional close ensure we don't delay our next connection attempt.
|
|
14723
|
-
this.
|
|
14745
|
+
this.x_.reset() : t && t.code === L.RESOURCE_EXHAUSTED ? (
|
|
14724
14746
|
// Log the error. (Probably either 'quota exceeded' or 'max queue length reached'.)
|
|
14725
14747
|
__PRIVATE_logError(t.toString()), __PRIVATE_logError("Using maximum backoff delay to prevent overloading the backend."),
|
|
14726
|
-
this.
|
|
14748
|
+
this.x_.p_()) : t && t.code === L.UNAUTHENTICATED && 3 /* PersistentStreamState.Healthy */ !== this.state && (
|
|
14727
14749
|
// "unauthenticated" error means the token was rejected. This should rarely
|
|
14728
14750
|
// happen since both Auth and AppCheck ensure a sufficient TTL when we
|
|
14729
14751
|
// request a token. If a user manually resets their system clock this can
|
|
@@ -14732,7 +14754,7 @@ class __PRIVATE_PersistentStream {
|
|
|
14732
14754
|
// to ensure that we fetch a new token.
|
|
14733
14755
|
this.authCredentialsProvider.invalidateToken(), this.appCheckCredentialsProvider.invalidateToken()),
|
|
14734
14756
|
// Clean up the underlying stream because we are no longer interested in events.
|
|
14735
|
-
null !== this.stream && (this.
|
|
14757
|
+
null !== this.stream && (this.W_(), this.stream.close(), this.stream = null),
|
|
14736
14758
|
// This state must be assigned before calling onClose() to allow the callback to
|
|
14737
14759
|
// inhibit backoff or otherwise manipulate the state in its non-started state.
|
|
14738
14760
|
this.state = e,
|
|
@@ -14742,48 +14764,48 @@ class __PRIVATE_PersistentStream {
|
|
|
14742
14764
|
/**
|
|
14743
14765
|
* Can be overridden to perform additional cleanup before the stream is closed.
|
|
14744
14766
|
* Calling super.tearDown() is not required.
|
|
14745
|
-
*/
|
|
14767
|
+
*/ W_() {}
|
|
14746
14768
|
auth() {
|
|
14747
14769
|
this.state = 1 /* PersistentStreamState.Starting */;
|
|
14748
|
-
const e = this.
|
|
14770
|
+
const e = this.G_(this.v_), t = this.v_;
|
|
14749
14771
|
// TODO(mikelehen): Just use dispatchIfNotClosed, but see TODO below.
|
|
14750
14772
|
Promise.all([ this.authCredentialsProvider.getToken(), this.appCheckCredentialsProvider.getToken() ]).then((([e, n]) => {
|
|
14751
14773
|
// Stream can be stopped while waiting for authentication.
|
|
14752
14774
|
// TODO(mikelehen): We really should just use dispatchIfNotClosed
|
|
14753
14775
|
// and let this dispatch onto the queue, but that opened a spec test can
|
|
14754
14776
|
// of worms that I don't want to deal with in this PR.
|
|
14755
|
-
this.
|
|
14777
|
+
this.v_ === t &&
|
|
14756
14778
|
// Normally we'd have to schedule the callback on the AsyncQueue.
|
|
14757
14779
|
// However, the following calls are safe to be called outside the
|
|
14758
14780
|
// AsyncQueue since they don't chain asynchronous calls
|
|
14759
|
-
this.
|
|
14781
|
+
this.z_(e, n);
|
|
14760
14782
|
}), (t => {
|
|
14761
14783
|
e((() => {
|
|
14762
14784
|
const e = new FirestoreError(L.UNKNOWN, "Fetching auth token failed: " + t.message);
|
|
14763
|
-
return this.
|
|
14785
|
+
return this.j_(e);
|
|
14764
14786
|
}));
|
|
14765
14787
|
}));
|
|
14766
14788
|
}
|
|
14767
|
-
|
|
14768
|
-
const n = this.
|
|
14769
|
-
this.stream = this.
|
|
14789
|
+
z_(e, t) {
|
|
14790
|
+
const n = this.G_(this.v_);
|
|
14791
|
+
this.stream = this.H_(e, t), this.stream.e_((() => {
|
|
14770
14792
|
n((() => this.listener.e_()));
|
|
14771
14793
|
})), this.stream.n_((() => {
|
|
14772
|
-
n((() => (this.state = 2 /* PersistentStreamState.Open */ , this.
|
|
14794
|
+
n((() => (this.state = 2 /* PersistentStreamState.Open */ , this.F_ = this.xi.enqueueAfterDelay(this.D_, 1e4, (() => (this.N_() && (this.state = 3 /* PersistentStreamState.Healthy */),
|
|
14773
14795
|
Promise.resolve()))), this.listener.n_())));
|
|
14774
14796
|
})), this.stream.i_((e => {
|
|
14775
|
-
n((() => this.
|
|
14797
|
+
n((() => this.j_(e)));
|
|
14776
14798
|
})), this.stream.onMessage((e => {
|
|
14777
|
-
n((() => 1 == ++this.
|
|
14799
|
+
n((() => 1 == ++this.M_ ? this.J_(e) : this.onNext(e)));
|
|
14778
14800
|
}));
|
|
14779
14801
|
}
|
|
14780
|
-
|
|
14781
|
-
this.state = 5 /* PersistentStreamState.Backoff */ , this.
|
|
14802
|
+
B_() {
|
|
14803
|
+
this.state = 5 /* PersistentStreamState.Backoff */ , this.x_.y_((async () => {
|
|
14782
14804
|
this.state = 0 /* PersistentStreamState.Initial */ , this.start();
|
|
14783
14805
|
}));
|
|
14784
14806
|
}
|
|
14785
14807
|
// Visible for tests
|
|
14786
|
-
|
|
14808
|
+
j_(e) {
|
|
14787
14809
|
// In theory the stream could close cleanly, however, in our current model
|
|
14788
14810
|
// we never expect this to happen because if we stop a stream ourselves,
|
|
14789
14811
|
// this callback will never be called. To prevent cases where we retry
|
|
@@ -14795,9 +14817,9 @@ class __PRIVATE_PersistentStream {
|
|
|
14795
14817
|
* AsyncQueue but only runs them if closeCount remains unchanged. This allows
|
|
14796
14818
|
* us to turn auth / stream callbacks into no-ops if the stream is closed /
|
|
14797
14819
|
* re-opened, etc.
|
|
14798
|
-
*/
|
|
14820
|
+
*/ G_(e) {
|
|
14799
14821
|
return t => {
|
|
14800
|
-
this.xi.enqueueAndForget((() => this.
|
|
14822
|
+
this.xi.enqueueAndForget((() => this.v_ === e ? t() : (__PRIVATE_logDebug(tn, "stream callback skipped by getCloseGuardedDispatcher."),
|
|
14801
14823
|
Promise.resolve())));
|
|
14802
14824
|
};
|
|
14803
14825
|
}
|
|
@@ -14814,15 +14836,15 @@ class __PRIVATE_PersistentStream {
|
|
|
14814
14836
|
super(e, "listen_stream_connection_backoff" /* TimerId.ListenStreamConnectionBackoff */ , "listen_stream_idle" /* TimerId.ListenStreamIdle */ , "health_check_timeout" /* TimerId.HealthCheckTimeout */ , t, n, r, s),
|
|
14815
14837
|
this.serializer = i;
|
|
14816
14838
|
}
|
|
14817
|
-
|
|
14818
|
-
return this.connection.
|
|
14839
|
+
H_(e, t) {
|
|
14840
|
+
return this.connection.I_("Listen", e, t);
|
|
14819
14841
|
}
|
|
14820
|
-
|
|
14842
|
+
J_(e) {
|
|
14821
14843
|
return this.onNext(e);
|
|
14822
14844
|
}
|
|
14823
14845
|
onNext(e) {
|
|
14824
14846
|
// A successful response means the stream is healthy
|
|
14825
|
-
this.
|
|
14847
|
+
this.x_.reset();
|
|
14826
14848
|
const t = __PRIVATE_fromWatchChange(this.serializer, e), n = function __PRIVATE_versionFromListenResponse(e) {
|
|
14827
14849
|
// We have only reached a consistent snapshot for the entire stream if there
|
|
14828
14850
|
// is a read_time set and it applies to all targets (i.e. the list of
|
|
@@ -14831,14 +14853,14 @@ class __PRIVATE_PersistentStream {
|
|
|
14831
14853
|
const t = e.targetChange;
|
|
14832
14854
|
return t.targetIds && t.targetIds.length ? SnapshotVersion.min() : t.readTime ? __PRIVATE_fromVersion(t.readTime) : SnapshotVersion.min();
|
|
14833
14855
|
}(e);
|
|
14834
|
-
return this.listener.
|
|
14856
|
+
return this.listener.Y_(t, n);
|
|
14835
14857
|
}
|
|
14836
14858
|
/**
|
|
14837
14859
|
* Registers interest in the results of the given target. If the target
|
|
14838
14860
|
* includes a resumeToken it will be included in the request. Results that
|
|
14839
14861
|
* affect the target will be streamed back as WatchChange messages that
|
|
14840
14862
|
* reference the targetId.
|
|
14841
|
-
*/
|
|
14863
|
+
*/ Z_(e) {
|
|
14842
14864
|
const t = {};
|
|
14843
14865
|
t.database = __PRIVATE_getEncodedDatabaseId(this.serializer), t.addTarget = function __PRIVATE_toTarget(e, t) {
|
|
14844
14866
|
let n;
|
|
@@ -14862,15 +14884,15 @@ class __PRIVATE_PersistentStream {
|
|
|
14862
14884
|
return n;
|
|
14863
14885
|
}(this.serializer, e);
|
|
14864
14886
|
const n = __PRIVATE_toListenRequestLabels(this.serializer, e);
|
|
14865
|
-
n && (t.labels = n), this.
|
|
14887
|
+
n && (t.labels = n), this.Q_(t);
|
|
14866
14888
|
}
|
|
14867
14889
|
/**
|
|
14868
14890
|
* Unregisters interest in the results of the target associated with the
|
|
14869
14891
|
* given targetId.
|
|
14870
|
-
*/
|
|
14892
|
+
*/ X_(e) {
|
|
14871
14893
|
const t = {};
|
|
14872
14894
|
t.database = __PRIVATE_getEncodedDatabaseId(this.serializer), t.removeTarget = e,
|
|
14873
|
-
this.
|
|
14895
|
+
this.Q_(t);
|
|
14874
14896
|
}
|
|
14875
14897
|
}
|
|
14876
14898
|
|
|
@@ -14898,24 +14920,24 @@ class __PRIVATE_PersistentStream {
|
|
|
14898
14920
|
/**
|
|
14899
14921
|
* Tracks whether or not a handshake has been successfully exchanged and
|
|
14900
14922
|
* the stream is ready to accept mutations.
|
|
14901
|
-
*/ get
|
|
14902
|
-
return this.
|
|
14923
|
+
*/ get ea() {
|
|
14924
|
+
return this.M_ > 0;
|
|
14903
14925
|
}
|
|
14904
14926
|
// Override of PersistentStream.start
|
|
14905
14927
|
start() {
|
|
14906
14928
|
this.lastStreamToken = void 0, super.start();
|
|
14907
14929
|
}
|
|
14908
|
-
|
|
14909
|
-
this.
|
|
14930
|
+
W_() {
|
|
14931
|
+
this.ea && this.ta([]);
|
|
14910
14932
|
}
|
|
14911
|
-
|
|
14912
|
-
return this.connection.
|
|
14933
|
+
H_(e, t) {
|
|
14934
|
+
return this.connection.I_("Write", e, t);
|
|
14913
14935
|
}
|
|
14914
|
-
|
|
14936
|
+
J_(e) {
|
|
14915
14937
|
// Always capture the last stream token.
|
|
14916
14938
|
return __PRIVATE_hardAssert(!!e.streamToken, 31322), this.lastStreamToken = e.streamToken,
|
|
14917
14939
|
// The first response is always the handshake response
|
|
14918
|
-
__PRIVATE_hardAssert(!e.writeResults || 0 === e.writeResults.length, 55816), this.listener.
|
|
14940
|
+
__PRIVATE_hardAssert(!e.writeResults || 0 === e.writeResults.length, 55816), this.listener.na();
|
|
14919
14941
|
}
|
|
14920
14942
|
onNext(e) {
|
|
14921
14943
|
// Always capture the last stream token.
|
|
@@ -14923,26 +14945,26 @@ class __PRIVATE_PersistentStream {
|
|
|
14923
14945
|
// A successful first write response means the stream is healthy,
|
|
14924
14946
|
// Note, that we could consider a successful handshake healthy, however,
|
|
14925
14947
|
// the write itself might be causing an error we want to back off from.
|
|
14926
|
-
this.
|
|
14948
|
+
this.x_.reset();
|
|
14927
14949
|
const t = __PRIVATE_fromWriteResults(e.writeResults, e.commitTime), n = __PRIVATE_fromVersion(e.commitTime);
|
|
14928
|
-
return this.listener.
|
|
14950
|
+
return this.listener.ra(n, t);
|
|
14929
14951
|
}
|
|
14930
14952
|
/**
|
|
14931
14953
|
* Sends an initial streamToken to the server, performing the handshake
|
|
14932
14954
|
* required to make the StreamingWrite RPC work. Subsequent
|
|
14933
14955
|
* calls should wait until onHandshakeComplete was called.
|
|
14934
|
-
*/
|
|
14956
|
+
*/ ia() {
|
|
14935
14957
|
// TODO(dimond): Support stream resumption. We intentionally do not set the
|
|
14936
14958
|
// stream token on the handshake, ignoring any stream token we might have.
|
|
14937
14959
|
const e = {};
|
|
14938
|
-
e.database = __PRIVATE_getEncodedDatabaseId(this.serializer), this.
|
|
14960
|
+
e.database = __PRIVATE_getEncodedDatabaseId(this.serializer), this.Q_(e);
|
|
14939
14961
|
}
|
|
14940
|
-
/** Sends a group of mutations to the Firestore backend to apply. */
|
|
14962
|
+
/** Sends a group of mutations to the Firestore backend to apply. */ ta(e) {
|
|
14941
14963
|
const t = {
|
|
14942
14964
|
streamToken: this.lastStreamToken,
|
|
14943
14965
|
writes: e.map((e => toMutation(this.serializer, e)))
|
|
14944
14966
|
};
|
|
14945
|
-
this.
|
|
14967
|
+
this.Q_(t);
|
|
14946
14968
|
}
|
|
14947
14969
|
}
|
|
14948
14970
|
|
|
@@ -14974,25 +14996,25 @@ class __PRIVATE_PersistentStream {
|
|
|
14974
14996
|
*/ class __PRIVATE_DatastoreImpl extends Datastore {
|
|
14975
14997
|
constructor(e, t, n, r) {
|
|
14976
14998
|
super(), this.authCredentials = e, this.appCheckCredentials = t, this.connection = n,
|
|
14977
|
-
this.serializer = r, this.
|
|
14999
|
+
this.serializer = r, this.sa = !1;
|
|
14978
15000
|
}
|
|
14979
|
-
|
|
14980
|
-
if (this.
|
|
15001
|
+
oa() {
|
|
15002
|
+
if (this.sa) throw new FirestoreError(L.FAILED_PRECONDITION, "The client has already been terminated.");
|
|
14981
15003
|
}
|
|
14982
15004
|
/** Invokes the provided RPC with auth and AppCheck tokens. */ zo(e, t, n, r) {
|
|
14983
|
-
return this.
|
|
15005
|
+
return this.oa(), Promise.all([ this.authCredentials.getToken(), this.appCheckCredentials.getToken() ]).then((([i, s]) => this.connection.zo(e, __PRIVATE_toResourcePath(t, n), r, i, s))).catch((e => {
|
|
14984
15006
|
throw "FirebaseError" === e.name ? (e.code === L.UNAUTHENTICATED && (this.authCredentials.invalidateToken(),
|
|
14985
15007
|
this.appCheckCredentials.invalidateToken()), e) : new FirestoreError(L.UNKNOWN, e.toString());
|
|
14986
15008
|
}));
|
|
14987
15009
|
}
|
|
14988
15010
|
/** Invokes the provided RPC with streamed results with auth and AppCheck tokens. */ Yo(e, t, n, r, i) {
|
|
14989
|
-
return this.
|
|
15011
|
+
return this.oa(), Promise.all([ this.authCredentials.getToken(), this.appCheckCredentials.getToken() ]).then((([s, o]) => this.connection.Yo(e, __PRIVATE_toResourcePath(t, n), r, s, o, i))).catch((e => {
|
|
14990
15012
|
throw "FirebaseError" === e.name ? (e.code === L.UNAUTHENTICATED && (this.authCredentials.invalidateToken(),
|
|
14991
15013
|
this.appCheckCredentials.invalidateToken()), e) : new FirestoreError(L.UNKNOWN, e.toString());
|
|
14992
15014
|
}));
|
|
14993
15015
|
}
|
|
14994
15016
|
terminate() {
|
|
14995
|
-
this.
|
|
15017
|
+
this.sa = !0, this.connection.terminate();
|
|
14996
15018
|
}
|
|
14997
15019
|
}
|
|
14998
15020
|
|
|
@@ -15019,19 +15041,19 @@ class __PRIVATE_OnlineStateTracker {
|
|
|
15019
15041
|
* maximum defined by MAX_WATCH_STREAM_FAILURES, we'll set the OnlineState to
|
|
15020
15042
|
* Offline.
|
|
15021
15043
|
*/
|
|
15022
|
-
this.
|
|
15044
|
+
this._a = 0,
|
|
15023
15045
|
/**
|
|
15024
15046
|
* A timer that elapses after ONLINE_STATE_TIMEOUT_MS, at which point we
|
|
15025
15047
|
* transition from OnlineState.Unknown to OnlineState.Offline without waiting
|
|
15026
15048
|
* for the stream to actually fail (MAX_WATCH_STREAM_FAILURES times).
|
|
15027
15049
|
*/
|
|
15028
|
-
this.
|
|
15050
|
+
this.aa = null,
|
|
15029
15051
|
/**
|
|
15030
15052
|
* Whether the client should log a warning message if it fails to connect to
|
|
15031
15053
|
* the backend (initially true, cleared after a successful stream, or if we've
|
|
15032
15054
|
* logged the message already).
|
|
15033
15055
|
*/
|
|
15034
|
-
this.
|
|
15056
|
+
this.ua = !0;
|
|
15035
15057
|
}
|
|
15036
15058
|
/**
|
|
15037
15059
|
* Called by RemoteStore when a watch stream is started (including on each
|
|
@@ -15039,9 +15061,9 @@ class __PRIVATE_OnlineStateTracker {
|
|
|
15039
15061
|
*
|
|
15040
15062
|
* If this is the first attempt, it sets the OnlineState to Unknown and starts
|
|
15041
15063
|
* the onlineStateTimer.
|
|
15042
|
-
*/
|
|
15043
|
-
0 === this.
|
|
15044
|
-
this.
|
|
15064
|
+
*/ ca() {
|
|
15065
|
+
0 === this._a && (this.la("Unknown" /* OnlineState.Unknown */), this.aa = this.asyncQueue.enqueueAfterDelay("online_state_timeout" /* TimerId.OnlineStateTimeout */ , 1e4, (() => (this.aa = null,
|
|
15066
|
+
this.ha("Backend didn't respond within 10 seconds."), this.la("Offline" /* OnlineState.Offline */),
|
|
15045
15067
|
Promise.resolve()))));
|
|
15046
15068
|
}
|
|
15047
15069
|
/**
|
|
@@ -15049,10 +15071,10 @@ class __PRIVATE_OnlineStateTracker {
|
|
|
15049
15071
|
* failure. The first failure moves us to the 'Unknown' state. We then may
|
|
15050
15072
|
* allow multiple failures (based on MAX_WATCH_STREAM_FAILURES) before we
|
|
15051
15073
|
* actually transition to the 'Offline' state.
|
|
15052
|
-
*/
|
|
15053
|
-
"Online" /* OnlineState.Online */ === this.state ? this.
|
|
15054
|
-
this.
|
|
15055
|
-
this.
|
|
15074
|
+
*/ Pa(e) {
|
|
15075
|
+
"Online" /* OnlineState.Online */ === this.state ? this.la("Unknown" /* OnlineState.Unknown */) : (this._a++,
|
|
15076
|
+
this._a >= 1 && (this.Ta(), this.ha(`Connection failed 1 times. Most recent error: ${e.toString()}`),
|
|
15077
|
+
this.la("Offline" /* OnlineState.Offline */)));
|
|
15056
15078
|
}
|
|
15057
15079
|
/**
|
|
15058
15080
|
* Explicitly sets the OnlineState to the specified state.
|
|
@@ -15061,20 +15083,20 @@ class __PRIVATE_OnlineStateTracker {
|
|
|
15061
15083
|
* Offline heuristics, so must not be used in place of
|
|
15062
15084
|
* handleWatchStreamStart() and handleWatchStreamFailure().
|
|
15063
15085
|
*/ set(e) {
|
|
15064
|
-
this.
|
|
15086
|
+
this.Ta(), this._a = 0, "Online" /* OnlineState.Online */ === e && (
|
|
15065
15087
|
// We've connected to watch at least once. Don't warn the developer
|
|
15066
15088
|
// about being offline going forward.
|
|
15067
|
-
this.
|
|
15089
|
+
this.ua = !1), this.la(e);
|
|
15068
15090
|
}
|
|
15069
|
-
|
|
15091
|
+
la(e) {
|
|
15070
15092
|
e !== this.state && (this.state = e, this.onlineStateHandler(e));
|
|
15071
15093
|
}
|
|
15072
|
-
|
|
15094
|
+
ha(e) {
|
|
15073
15095
|
const t = `Could not reach Cloud Firestore backend. ${e}\nThis typically indicates that your device does not have a healthy Internet connection at the moment. The client will operate in offline mode until it is able to successfully connect to the backend.`;
|
|
15074
|
-
this.
|
|
15096
|
+
this.ua ? (__PRIVATE_logError(t), this.ua = !1) : __PRIVATE_logDebug("OnlineStateTracker", t);
|
|
15075
15097
|
}
|
|
15076
|
-
|
|
15077
|
-
null !== this.
|
|
15098
|
+
Ta() {
|
|
15099
|
+
null !== this.aa && (this.aa.cancel(), this.aa = null);
|
|
15078
15100
|
}
|
|
15079
15101
|
}
|
|
15080
15102
|
|
|
@@ -15122,7 +15144,7 @@ class __PRIVATE_RemoteStoreImpl {
|
|
|
15122
15144
|
* purely based on order, and so we can just shift() writes from the front of
|
|
15123
15145
|
* the writePipeline as we receive responses.
|
|
15124
15146
|
*/
|
|
15125
|
-
this.
|
|
15147
|
+
this.Ia = [],
|
|
15126
15148
|
/**
|
|
15127
15149
|
* A mapping of watched targets that the client cares about tracking and the
|
|
15128
15150
|
* user has explicitly called a 'listen' for this target.
|
|
@@ -15132,12 +15154,12 @@ class __PRIVATE_RemoteStoreImpl {
|
|
|
15132
15154
|
* to the server. The targets removed with unlistens are removed eagerly
|
|
15133
15155
|
* without waiting for confirmation from the listen stream.
|
|
15134
15156
|
*/
|
|
15135
|
-
this.
|
|
15157
|
+
this.Ea = new Map,
|
|
15136
15158
|
/**
|
|
15137
15159
|
* A set of reasons for why the RemoteStore may be offline. If empty, the
|
|
15138
15160
|
* RemoteStore may start its network connections.
|
|
15139
15161
|
*/
|
|
15140
|
-
this.
|
|
15162
|
+
this.da = new Set,
|
|
15141
15163
|
/**
|
|
15142
15164
|
* Event handlers that get called when the network is disabled or enabled.
|
|
15143
15165
|
*
|
|
@@ -15145,7 +15167,7 @@ class __PRIVATE_RemoteStoreImpl {
|
|
|
15145
15167
|
* underlying streams (to support tree-shakeable streams). On Android and iOS,
|
|
15146
15168
|
* the streams are created during construction of RemoteStore.
|
|
15147
15169
|
*/
|
|
15148
|
-
this.
|
|
15170
|
+
this.Aa = [], this.Ra = i, this.Ra.No((e => {
|
|
15149
15171
|
n.enqueueAndForget((async () => {
|
|
15150
15172
|
// Porting Note: Unlike iOS, `restartNetwork()` is called even when the
|
|
15151
15173
|
// network becomes unreachable as we don't have any other way to tear
|
|
@@ -15153,24 +15175,24 @@ class __PRIVATE_RemoteStoreImpl {
|
|
|
15153
15175
|
__PRIVATE_canUseNetwork(this) && (__PRIVATE_logDebug(nn, "Restarting streams for network reachability change."),
|
|
15154
15176
|
await async function __PRIVATE_restartNetwork(e) {
|
|
15155
15177
|
const t = __PRIVATE_debugCast(e);
|
|
15156
|
-
t.
|
|
15157
|
-
t.
|
|
15178
|
+
t.da.add(4 /* OfflineCause.ConnectivityChange */), await __PRIVATE_disableNetworkInternal(t),
|
|
15179
|
+
t.Va.set("Unknown" /* OnlineState.Unknown */), t.da.delete(4 /* OfflineCause.ConnectivityChange */),
|
|
15158
15180
|
await __PRIVATE_enableNetworkInternal(t);
|
|
15159
15181
|
}(this));
|
|
15160
15182
|
}));
|
|
15161
|
-
})), this.
|
|
15183
|
+
})), this.Va = new __PRIVATE_OnlineStateTracker(n, r);
|
|
15162
15184
|
}
|
|
15163
15185
|
}
|
|
15164
15186
|
|
|
15165
15187
|
async function __PRIVATE_enableNetworkInternal(e) {
|
|
15166
|
-
if (__PRIVATE_canUseNetwork(e)) for (const t of e.
|
|
15188
|
+
if (__PRIVATE_canUseNetwork(e)) for (const t of e.Aa) await t(/* enabled= */ !0);
|
|
15167
15189
|
}
|
|
15168
15190
|
|
|
15169
15191
|
/**
|
|
15170
15192
|
* Temporarily disables the network. The network can be re-enabled using
|
|
15171
15193
|
* enableNetwork().
|
|
15172
15194
|
*/ async function __PRIVATE_disableNetworkInternal(e) {
|
|
15173
|
-
for (const t of e.
|
|
15195
|
+
for (const t of e.Aa) await t(/* enabled= */ !1);
|
|
15174
15196
|
}
|
|
15175
15197
|
|
|
15176
15198
|
/**
|
|
@@ -15179,11 +15201,11 @@ async function __PRIVATE_enableNetworkInternal(e) {
|
|
|
15179
15201
|
*/
|
|
15180
15202
|
function __PRIVATE_remoteStoreListen(e, t) {
|
|
15181
15203
|
const n = __PRIVATE_debugCast(e);
|
|
15182
|
-
n.
|
|
15204
|
+
n.Ea.has(t.targetId) || (
|
|
15183
15205
|
// Mark this as something the client is currently listening for.
|
|
15184
|
-
n.
|
|
15206
|
+
n.Ea.set(t.targetId, t), __PRIVATE_shouldStartWatchStream(n) ?
|
|
15185
15207
|
// The listen will be sent in onWatchStreamOpen
|
|
15186
|
-
__PRIVATE_startWatchStream(n) : __PRIVATE_ensureWatchStream(n).
|
|
15208
|
+
__PRIVATE_startWatchStream(n) : __PRIVATE_ensureWatchStream(n).N_() && __PRIVATE_sendWatchRequest(n, t));
|
|
15187
15209
|
}
|
|
15188
15210
|
|
|
15189
15211
|
/**
|
|
@@ -15191,22 +15213,22 @@ function __PRIVATE_remoteStoreListen(e, t) {
|
|
|
15191
15213
|
* not being listened to.
|
|
15192
15214
|
*/ function __PRIVATE_remoteStoreUnlisten(e, t) {
|
|
15193
15215
|
const n = __PRIVATE_debugCast(e), r = __PRIVATE_ensureWatchStream(n);
|
|
15194
|
-
n.
|
|
15216
|
+
n.Ea.delete(t), r.N_() && __PRIVATE_sendUnwatchRequest(n, t), 0 === n.Ea.size && (r.N_() ? r.k_() : __PRIVATE_canUseNetwork(n) &&
|
|
15195
15217
|
// Revert to OnlineState.Unknown if the watch stream is not open and we
|
|
15196
15218
|
// have no listeners, since without any listens to send we cannot
|
|
15197
15219
|
// confirm if the stream is healthy and upgrade to OnlineState.Online.
|
|
15198
|
-
n.
|
|
15220
|
+
n.Va.set("Unknown" /* OnlineState.Unknown */));
|
|
15199
15221
|
}
|
|
15200
15222
|
|
|
15201
15223
|
/**
|
|
15202
15224
|
* We need to increment the expected number of pending responses we're due
|
|
15203
15225
|
* from watch so we wait for the ack to process any messages from this target.
|
|
15204
15226
|
*/ function __PRIVATE_sendWatchRequest(e, t) {
|
|
15205
|
-
if (e.
|
|
15227
|
+
if (e.ma.Ke(t.targetId), t.resumeToken.approximateByteSize() > 0 || t.snapshotVersion.compareTo(SnapshotVersion.min()) > 0) {
|
|
15206
15228
|
const n = e.remoteSyncer.getRemoteKeysForTarget(t.targetId).size;
|
|
15207
15229
|
t = t.withExpectedCount(n);
|
|
15208
15230
|
}
|
|
15209
|
-
__PRIVATE_ensureWatchStream(e).
|
|
15231
|
+
__PRIVATE_ensureWatchStream(e).Z_(t);
|
|
15210
15232
|
}
|
|
15211
15233
|
|
|
15212
15234
|
/**
|
|
@@ -15214,39 +15236,39 @@ function __PRIVATE_remoteStoreListen(e, t) {
|
|
|
15214
15236
|
* from watch so we wait for the removal on the server before we process any
|
|
15215
15237
|
* messages from this target.
|
|
15216
15238
|
*/ function __PRIVATE_sendUnwatchRequest(e, t) {
|
|
15217
|
-
e.
|
|
15239
|
+
e.ma.Ke(t), __PRIVATE_ensureWatchStream(e).X_(t);
|
|
15218
15240
|
}
|
|
15219
15241
|
|
|
15220
15242
|
function __PRIVATE_startWatchStream(e) {
|
|
15221
|
-
e.
|
|
15243
|
+
e.ma = new __PRIVATE_WatchChangeAggregator({
|
|
15222
15244
|
getRemoteKeysForTarget: t => e.remoteSyncer.getRemoteKeysForTarget(t),
|
|
15223
|
-
Rt: t => e.
|
|
15245
|
+
Rt: t => e.Ea.get(t) || null,
|
|
15224
15246
|
Pt: () => e.datastore.serializer.databaseId
|
|
15225
|
-
}), __PRIVATE_ensureWatchStream(e).start(), e.
|
|
15247
|
+
}), __PRIVATE_ensureWatchStream(e).start(), e.Va.ca();
|
|
15226
15248
|
}
|
|
15227
15249
|
|
|
15228
15250
|
/**
|
|
15229
15251
|
* Returns whether the watch stream should be started because it's necessary
|
|
15230
15252
|
* and has not yet been started.
|
|
15231
15253
|
*/ function __PRIVATE_shouldStartWatchStream(e) {
|
|
15232
|
-
return __PRIVATE_canUseNetwork(e) && !__PRIVATE_ensureWatchStream(e).
|
|
15254
|
+
return __PRIVATE_canUseNetwork(e) && !__PRIVATE_ensureWatchStream(e).O_() && e.Ea.size > 0;
|
|
15233
15255
|
}
|
|
15234
15256
|
|
|
15235
15257
|
function __PRIVATE_canUseNetwork(e) {
|
|
15236
|
-
return 0 === __PRIVATE_debugCast(e).
|
|
15258
|
+
return 0 === __PRIVATE_debugCast(e).da.size;
|
|
15237
15259
|
}
|
|
15238
15260
|
|
|
15239
15261
|
function __PRIVATE_cleanUpWatchStreamState(e) {
|
|
15240
|
-
e.
|
|
15262
|
+
e.ma = void 0;
|
|
15241
15263
|
}
|
|
15242
15264
|
|
|
15243
15265
|
async function __PRIVATE_onWatchStreamConnected(e) {
|
|
15244
15266
|
// Mark the client as online since we got a "connected" notification.
|
|
15245
|
-
e.
|
|
15267
|
+
e.Va.set("Online" /* OnlineState.Online */);
|
|
15246
15268
|
}
|
|
15247
15269
|
|
|
15248
15270
|
async function __PRIVATE_onWatchStreamOpen(e) {
|
|
15249
|
-
e.
|
|
15271
|
+
e.Ea.forEach(((t, n) => {
|
|
15250
15272
|
__PRIVATE_sendWatchRequest(e, t);
|
|
15251
15273
|
}));
|
|
15252
15274
|
}
|
|
@@ -15254,17 +15276,17 @@ async function __PRIVATE_onWatchStreamOpen(e) {
|
|
|
15254
15276
|
async function __PRIVATE_onWatchStreamClose(e, t) {
|
|
15255
15277
|
__PRIVATE_cleanUpWatchStreamState(e),
|
|
15256
15278
|
// If we still need the watch stream, retry the connection.
|
|
15257
|
-
__PRIVATE_shouldStartWatchStream(e) ? (e.
|
|
15279
|
+
__PRIVATE_shouldStartWatchStream(e) ? (e.Va.Pa(t), __PRIVATE_startWatchStream(e)) :
|
|
15258
15280
|
// No need to restart watch stream because there are no active targets.
|
|
15259
15281
|
// The online state is set to unknown because there is no active attempt
|
|
15260
15282
|
// at establishing a connection
|
|
15261
|
-
e.
|
|
15283
|
+
e.Va.set("Unknown" /* OnlineState.Unknown */);
|
|
15262
15284
|
}
|
|
15263
15285
|
|
|
15264
15286
|
async function __PRIVATE_onWatchStreamChange(e, t, n) {
|
|
15265
15287
|
if (
|
|
15266
15288
|
// Mark the client as online since we got a message from the server
|
|
15267
|
-
e.
|
|
15289
|
+
e.Va.set("Online" /* OnlineState.Online */), t instanceof __PRIVATE_WatchTargetChange && 2 /* WatchTargetChangeState.Removed */ === t.state && t.cause)
|
|
15268
15290
|
// There was an error on a target, don't wait for a consistent snapshot
|
|
15269
15291
|
// to raise events
|
|
15270
15292
|
try {
|
|
@@ -15273,7 +15295,7 @@ async function __PRIVATE_onWatchStreamChange(e, t, n) {
|
|
|
15273
15295
|
const n = t.cause;
|
|
15274
15296
|
for (const r of t.targetIds)
|
|
15275
15297
|
// A watched target might have been removed already.
|
|
15276
|
-
e.
|
|
15298
|
+
e.Ea.has(r) && (await e.remoteSyncer.rejectListen(r, n), e.Ea.delete(r), e.ma.removeTarget(r));
|
|
15277
15299
|
}
|
|
15278
15300
|
/**
|
|
15279
15301
|
* Attempts to fill our write pipeline with writes from the LocalStore.
|
|
@@ -15286,7 +15308,7 @@ async function __PRIVATE_onWatchStreamChange(e, t, n) {
|
|
|
15286
15308
|
} catch (n) {
|
|
15287
15309
|
__PRIVATE_logDebug(nn, "Failed to remove targets %s: %s ", t.targetIds.join(","), n),
|
|
15288
15310
|
await __PRIVATE_disableNetworkUntilRecovery(e, n);
|
|
15289
|
-
} else if (t instanceof __PRIVATE_DocumentWatchChange ? e.
|
|
15311
|
+
} else if (t instanceof __PRIVATE_DocumentWatchChange ? e.ma.Xe(t) : t instanceof __PRIVATE_ExistenceFilterChange ? e.ma.ot(t) : e.ma.nt(t),
|
|
15290
15312
|
!n.isEqual(SnapshotVersion.min())) try {
|
|
15291
15313
|
const t = await __PRIVATE_localStoreGetLastRemoteSnapshotVersion(e.localStore);
|
|
15292
15314
|
n.compareTo(t) >= 0 &&
|
|
@@ -15298,26 +15320,26 @@ async function __PRIVATE_onWatchStreamChange(e, t, n) {
|
|
|
15298
15320
|
* SyncEngine.
|
|
15299
15321
|
*/
|
|
15300
15322
|
await function __PRIVATE_raiseWatchSnapshot(e, t) {
|
|
15301
|
-
const n = e.
|
|
15323
|
+
const n = e.ma.It(t);
|
|
15302
15324
|
// Update in-memory resume tokens. LocalStore will update the
|
|
15303
15325
|
// persistent view of these when applying the completed RemoteEvent.
|
|
15304
15326
|
return n.targetChanges.forEach(((n, r) => {
|
|
15305
15327
|
if (n.resumeToken.approximateByteSize() > 0) {
|
|
15306
|
-
const i = e.
|
|
15328
|
+
const i = e.Ea.get(r);
|
|
15307
15329
|
// A watched target might have been removed already.
|
|
15308
|
-
i && e.
|
|
15330
|
+
i && e.Ea.set(r, i.withResumeToken(n.resumeToken, t));
|
|
15309
15331
|
}
|
|
15310
15332
|
})),
|
|
15311
15333
|
// Re-establish listens for the targets that have been invalidated by
|
|
15312
15334
|
// existence filter mismatches.
|
|
15313
15335
|
n.targetMismatches.forEach(((t, n) => {
|
|
15314
|
-
const r = e.
|
|
15336
|
+
const r = e.Ea.get(t);
|
|
15315
15337
|
if (!r)
|
|
15316
15338
|
// A watched target might have been removed already.
|
|
15317
15339
|
return;
|
|
15318
15340
|
// Clear the resume token for the target, since we're in a known mismatch
|
|
15319
15341
|
// state.
|
|
15320
|
-
e.
|
|
15342
|
+
e.Ea.set(t, r.withResumeToken(ByteString.EMPTY_BYTE_STRING, r.snapshotVersion)),
|
|
15321
15343
|
// Cause a hard reset by unwatching and rewatching immediately, but
|
|
15322
15344
|
// deliberately don't send a resume token so that we get a full update.
|
|
15323
15345
|
__PRIVATE_sendUnwatchRequest(e, t);
|
|
@@ -15344,9 +15366,9 @@ async function __PRIVATE_onWatchStreamChange(e, t, n) {
|
|
|
15344
15366
|
* any retry attempt.
|
|
15345
15367
|
*/ async function __PRIVATE_disableNetworkUntilRecovery(e, t, n) {
|
|
15346
15368
|
if (!__PRIVATE_isIndexedDbTransactionError(t)) throw t;
|
|
15347
|
-
e.
|
|
15369
|
+
e.da.add(1 /* OfflineCause.IndexedDbFailed */),
|
|
15348
15370
|
// Disable network and raise offline snapshots
|
|
15349
|
-
await __PRIVATE_disableNetworkInternal(e), e.
|
|
15371
|
+
await __PRIVATE_disableNetworkInternal(e), e.Va.set("Offline" /* OnlineState.Offline */),
|
|
15350
15372
|
n || (
|
|
15351
15373
|
// Use a simple read operation to determine if IndexedDB recovered.
|
|
15352
15374
|
// Ideally, we would expose a health check directly on SimpleDb, but
|
|
@@ -15354,7 +15376,7 @@ async function __PRIVATE_onWatchStreamChange(e, t, n) {
|
|
|
15354
15376
|
n = () => __PRIVATE_localStoreGetLastRemoteSnapshotVersion(e.localStore)),
|
|
15355
15377
|
// Probe IndexedDB periodically and re-enable network
|
|
15356
15378
|
e.asyncQueue.enqueueRetryable((async () => {
|
|
15357
|
-
__PRIVATE_logDebug(nn, "Retrying IndexedDB access"), await n(), e.
|
|
15379
|
+
__PRIVATE_logDebug(nn, "Retrying IndexedDB access"), await n(), e.da.delete(1 /* OfflineCause.IndexedDbFailed */),
|
|
15358
15380
|
await __PRIVATE_enableNetworkInternal(e);
|
|
15359
15381
|
}));
|
|
15360
15382
|
}
|
|
@@ -15368,11 +15390,11 @@ async function __PRIVATE_onWatchStreamChange(e, t, n) {
|
|
|
15368
15390
|
|
|
15369
15391
|
async function __PRIVATE_fillWritePipeline(e) {
|
|
15370
15392
|
const t = __PRIVATE_debugCast(e), n = __PRIVATE_ensureWriteStream(t);
|
|
15371
|
-
let r = t.
|
|
15393
|
+
let r = t.Ia.length > 0 ? t.Ia[t.Ia.length - 1].batchId : j;
|
|
15372
15394
|
for (;__PRIVATE_canAddToWritePipeline(t); ) try {
|
|
15373
15395
|
const e = await __PRIVATE_localStoreGetNextMutationBatch(t.localStore, r);
|
|
15374
15396
|
if (null === e) {
|
|
15375
|
-
0 === t.
|
|
15397
|
+
0 === t.Ia.length && n.k_();
|
|
15376
15398
|
break;
|
|
15377
15399
|
}
|
|
15378
15400
|
r = e.batchId, __PRIVATE_addToWritePipeline(t, e);
|
|
@@ -15386,20 +15408,20 @@ async function __PRIVATE_fillWritePipeline(e) {
|
|
|
15386
15408
|
* Returns true if we can add to the write pipeline (i.e. the network is
|
|
15387
15409
|
* enabled and the write pipeline is not full).
|
|
15388
15410
|
*/ function __PRIVATE_canAddToWritePipeline(e) {
|
|
15389
|
-
return __PRIVATE_canUseNetwork(e) && e.
|
|
15411
|
+
return __PRIVATE_canUseNetwork(e) && e.Ia.length < 10;
|
|
15390
15412
|
}
|
|
15391
15413
|
|
|
15392
15414
|
/**
|
|
15393
15415
|
* Queues additional writes to be sent to the write stream, sending them
|
|
15394
15416
|
* immediately if the write stream is established.
|
|
15395
15417
|
*/ function __PRIVATE_addToWritePipeline(e, t) {
|
|
15396
|
-
e.
|
|
15418
|
+
e.Ia.push(t);
|
|
15397
15419
|
const n = __PRIVATE_ensureWriteStream(e);
|
|
15398
|
-
n.
|
|
15420
|
+
n.N_() && n.ea && n.ta(t.mutations);
|
|
15399
15421
|
}
|
|
15400
15422
|
|
|
15401
15423
|
function __PRIVATE_shouldStartWriteStream(e) {
|
|
15402
|
-
return __PRIVATE_canUseNetwork(e) && !__PRIVATE_ensureWriteStream(e).
|
|
15424
|
+
return __PRIVATE_canUseNetwork(e) && !__PRIVATE_ensureWriteStream(e).O_() && e.Ia.length > 0;
|
|
15403
15425
|
}
|
|
15404
15426
|
|
|
15405
15427
|
function __PRIVATE_startWriteStream(e) {
|
|
@@ -15407,17 +15429,17 @@ function __PRIVATE_startWriteStream(e) {
|
|
|
15407
15429
|
}
|
|
15408
15430
|
|
|
15409
15431
|
async function __PRIVATE_onWriteStreamOpen(e) {
|
|
15410
|
-
__PRIVATE_ensureWriteStream(e).
|
|
15432
|
+
__PRIVATE_ensureWriteStream(e).ia();
|
|
15411
15433
|
}
|
|
15412
15434
|
|
|
15413
15435
|
async function __PRIVATE_onWriteHandshakeComplete(e) {
|
|
15414
15436
|
const t = __PRIVATE_ensureWriteStream(e);
|
|
15415
15437
|
// Send the write pipeline now that the stream is established.
|
|
15416
|
-
for (const n of e.
|
|
15438
|
+
for (const n of e.Ia) t.ta(n.mutations);
|
|
15417
15439
|
}
|
|
15418
15440
|
|
|
15419
15441
|
async function __PRIVATE_onMutationResult(e, t, n) {
|
|
15420
|
-
const r = e.
|
|
15442
|
+
const r = e.Ia.shift(), i = MutationBatchResult.from(r, t, n);
|
|
15421
15443
|
await __PRIVATE_executeWithRecovery(e, (() => e.remoteSyncer.applySuccessfulWrite(i))),
|
|
15422
15444
|
// It's possible that with the completion of this mutation another
|
|
15423
15445
|
// slot has freed up.
|
|
@@ -15427,7 +15449,7 @@ async function __PRIVATE_onMutationResult(e, t, n) {
|
|
|
15427
15449
|
async function __PRIVATE_onWriteStreamClose(e, t) {
|
|
15428
15450
|
// If the write stream closed after the write handshake completes, a write
|
|
15429
15451
|
// operation failed and we fail the pending operation.
|
|
15430
|
-
t && __PRIVATE_ensureWriteStream(e).
|
|
15452
|
+
t && __PRIVATE_ensureWriteStream(e).ea &&
|
|
15431
15453
|
// This error affects the actual write.
|
|
15432
15454
|
await async function __PRIVATE_handleWriteError(e, t) {
|
|
15433
15455
|
// Only handle permanent errors here. If it's transient, just let the retry
|
|
@@ -15437,11 +15459,11 @@ async function __PRIVATE_onWriteStreamClose(e, t) {
|
|
|
15437
15459
|
}(t.code)) {
|
|
15438
15460
|
// This was a permanent error, the request itself was the problem
|
|
15439
15461
|
// so it's not going to succeed if we resend it.
|
|
15440
|
-
const n = e.
|
|
15462
|
+
const n = e.Ia.shift();
|
|
15441
15463
|
// In this case it's also unlikely that the server itself is melting
|
|
15442
15464
|
// down -- this was just a bad request so inhibit backoff on the next
|
|
15443
15465
|
// restart.
|
|
15444
|
-
__PRIVATE_ensureWriteStream(e).
|
|
15466
|
+
__PRIVATE_ensureWriteStream(e).L_(), await __PRIVATE_executeWithRecovery(e, (() => e.remoteSyncer.rejectFailedWrite(n.batchId, t))),
|
|
15445
15467
|
// It's possible that with the completion of this mutation
|
|
15446
15468
|
// another slot has freed up.
|
|
15447
15469
|
await __PRIVATE_fillWritePipeline(e);
|
|
@@ -15459,19 +15481,19 @@ async function __PRIVATE_remoteStoreHandleCredentialChange(e, t) {
|
|
|
15459
15481
|
// Tear down and re-create our network streams. This will ensure we get a
|
|
15460
15482
|
// fresh auth token for the new user and re-fill the write pipeline with
|
|
15461
15483
|
// new mutations from the LocalStore (since mutations are per-user).
|
|
15462
|
-
n.
|
|
15484
|
+
n.da.add(3 /* OfflineCause.CredentialChange */), await __PRIVATE_disableNetworkInternal(n),
|
|
15463
15485
|
r &&
|
|
15464
15486
|
// Don't set the network status to Unknown if we are offline.
|
|
15465
|
-
n.
|
|
15466
|
-
n.
|
|
15487
|
+
n.Va.set("Unknown" /* OnlineState.Unknown */), await n.remoteSyncer.handleCredentialChange(t),
|
|
15488
|
+
n.da.delete(3 /* OfflineCause.CredentialChange */), await __PRIVATE_enableNetworkInternal(n);
|
|
15467
15489
|
}
|
|
15468
15490
|
|
|
15469
15491
|
/**
|
|
15470
15492
|
* Toggles the network state when the client gains or loses its primary lease.
|
|
15471
15493
|
*/ async function __PRIVATE_remoteStoreApplyPrimaryState(e, t) {
|
|
15472
15494
|
const n = __PRIVATE_debugCast(e);
|
|
15473
|
-
t ? (n.
|
|
15474
|
-
await __PRIVATE_disableNetworkInternal(n), n.
|
|
15495
|
+
t ? (n.da.delete(2 /* OfflineCause.IsSecondary */), await __PRIVATE_enableNetworkInternal(n)) : t || (n.da.add(2 /* OfflineCause.IsSecondary */),
|
|
15496
|
+
await __PRIVATE_disableNetworkInternal(n), n.Va.set("Unknown" /* OnlineState.Unknown */));
|
|
15475
15497
|
}
|
|
15476
15498
|
|
|
15477
15499
|
/**
|
|
@@ -15482,11 +15504,11 @@ async function __PRIVATE_remoteStoreHandleCredentialChange(e, t) {
|
|
|
15482
15504
|
* PORTING NOTE: On iOS and Android, the WatchStream gets registered on startup.
|
|
15483
15505
|
* This is not done on Web to allow it to be tree-shaken.
|
|
15484
15506
|
*/ function __PRIVATE_ensureWatchStream(e) {
|
|
15485
|
-
return e.
|
|
15507
|
+
return e.fa || (
|
|
15486
15508
|
// Create stream (but note that it is not started yet).
|
|
15487
|
-
e.
|
|
15509
|
+
e.fa = function __PRIVATE_newPersistentWatchStream(e, t, n) {
|
|
15488
15510
|
const r = __PRIVATE_debugCast(e);
|
|
15489
|
-
return r.
|
|
15511
|
+
return r.oa(), new __PRIVATE_PersistentListenStream(t, r.connection, r.authCredentials, r.appCheckCredentials, r.serializer, n);
|
|
15490
15512
|
}
|
|
15491
15513
|
/**
|
|
15492
15514
|
* @license
|
|
@@ -15507,11 +15529,11 @@ async function __PRIVATE_remoteStoreHandleCredentialChange(e, t) {
|
|
|
15507
15529
|
e_: __PRIVATE_onWatchStreamConnected.bind(null, e),
|
|
15508
15530
|
n_: __PRIVATE_onWatchStreamOpen.bind(null, e),
|
|
15509
15531
|
i_: __PRIVATE_onWatchStreamClose.bind(null, e),
|
|
15510
|
-
|
|
15511
|
-
}), e.
|
|
15512
|
-
t ? (e.
|
|
15532
|
+
Y_: __PRIVATE_onWatchStreamChange.bind(null, e)
|
|
15533
|
+
}), e.Aa.push((async t => {
|
|
15534
|
+
t ? (e.fa.L_(), __PRIVATE_shouldStartWatchStream(e) ? __PRIVATE_startWatchStream(e) : e.Va.set("Unknown" /* OnlineState.Unknown */)) : (await e.fa.stop(),
|
|
15513
15535
|
__PRIVATE_cleanUpWatchStreamState(e));
|
|
15514
|
-
}))), e.
|
|
15536
|
+
}))), e.fa;
|
|
15515
15537
|
}
|
|
15516
15538
|
|
|
15517
15539
|
/**
|
|
@@ -15522,23 +15544,23 @@ async function __PRIVATE_remoteStoreHandleCredentialChange(e, t) {
|
|
|
15522
15544
|
* PORTING NOTE: On iOS and Android, the WriteStream gets registered on startup.
|
|
15523
15545
|
* This is not done on Web to allow it to be tree-shaken.
|
|
15524
15546
|
*/ function __PRIVATE_ensureWriteStream(e) {
|
|
15525
|
-
return e.
|
|
15547
|
+
return e.ga || (
|
|
15526
15548
|
// Create stream (but note that it is not started yet).
|
|
15527
|
-
e.
|
|
15549
|
+
e.ga = function __PRIVATE_newPersistentWriteStream(e, t, n) {
|
|
15528
15550
|
const r = __PRIVATE_debugCast(e);
|
|
15529
|
-
return r.
|
|
15551
|
+
return r.oa(), new __PRIVATE_PersistentWriteStream(t, r.connection, r.authCredentials, r.appCheckCredentials, r.serializer, n);
|
|
15530
15552
|
}(e.datastore, e.asyncQueue, {
|
|
15531
15553
|
e_: () => Promise.resolve(),
|
|
15532
15554
|
n_: __PRIVATE_onWriteStreamOpen.bind(null, e),
|
|
15533
15555
|
i_: __PRIVATE_onWriteStreamClose.bind(null, e),
|
|
15534
|
-
|
|
15535
|
-
|
|
15536
|
-
}), e.
|
|
15537
|
-
t ? (e.
|
|
15556
|
+
na: __PRIVATE_onWriteHandshakeComplete.bind(null, e),
|
|
15557
|
+
ra: __PRIVATE_onMutationResult.bind(null, e)
|
|
15558
|
+
}), e.Aa.push((async t => {
|
|
15559
|
+
t ? (e.ga.L_(),
|
|
15538
15560
|
// This will start the write stream if necessary.
|
|
15539
|
-
await __PRIVATE_fillWritePipeline(e)) : (await e.
|
|
15540
|
-
e.
|
|
15541
|
-
}))), e.
|
|
15561
|
+
await __PRIVATE_fillWritePipeline(e)) : (await e.ga.stop(), e.Ia.length > 0 && (__PRIVATE_logDebug(nn, `Stopping write stream with ${e.Ia.length} pending writes`),
|
|
15562
|
+
e.Ia = []));
|
|
15563
|
+
}))), e.ga;
|
|
15542
15564
|
}
|
|
15543
15565
|
|
|
15544
15566
|
/**
|
|
@@ -15751,25 +15773,25 @@ class DelayedOperation {
|
|
|
15751
15773
|
* duplicate events for the same doc.
|
|
15752
15774
|
*/ class __PRIVATE_DocumentChangeSet {
|
|
15753
15775
|
constructor() {
|
|
15754
|
-
this.
|
|
15776
|
+
this.pa = new SortedMap(DocumentKey.comparator);
|
|
15755
15777
|
}
|
|
15756
15778
|
track(e) {
|
|
15757
|
-
const t = e.doc.key, n = this.
|
|
15779
|
+
const t = e.doc.key, n = this.pa.get(t);
|
|
15758
15780
|
n ?
|
|
15759
15781
|
// Merge the new change with the existing change.
|
|
15760
|
-
0 /* ChangeType.Added */ !== e.type && 3 /* ChangeType.Metadata */ === n.type ? this.
|
|
15782
|
+
0 /* ChangeType.Added */ !== e.type && 3 /* ChangeType.Metadata */ === n.type ? this.pa = this.pa.insert(t, e) : 3 /* ChangeType.Metadata */ === e.type && 1 /* ChangeType.Removed */ !== n.type ? this.pa = this.pa.insert(t, {
|
|
15761
15783
|
type: n.type,
|
|
15762
15784
|
doc: e.doc
|
|
15763
|
-
}) : 2 /* ChangeType.Modified */ === e.type && 2 /* ChangeType.Modified */ === n.type ? this.
|
|
15785
|
+
}) : 2 /* ChangeType.Modified */ === e.type && 2 /* ChangeType.Modified */ === n.type ? this.pa = this.pa.insert(t, {
|
|
15764
15786
|
type: 2 /* ChangeType.Modified */ ,
|
|
15765
15787
|
doc: e.doc
|
|
15766
|
-
}) : 2 /* ChangeType.Modified */ === e.type && 0 /* ChangeType.Added */ === n.type ? this.
|
|
15788
|
+
}) : 2 /* ChangeType.Modified */ === e.type && 0 /* ChangeType.Added */ === n.type ? this.pa = this.pa.insert(t, {
|
|
15767
15789
|
type: 0 /* ChangeType.Added */ ,
|
|
15768
15790
|
doc: e.doc
|
|
15769
|
-
}) : 1 /* ChangeType.Removed */ === e.type && 0 /* ChangeType.Added */ === n.type ? this.
|
|
15791
|
+
}) : 1 /* ChangeType.Removed */ === e.type && 0 /* ChangeType.Added */ === n.type ? this.pa = this.pa.remove(t) : 1 /* ChangeType.Removed */ === e.type && 2 /* ChangeType.Modified */ === n.type ? this.pa = this.pa.insert(t, {
|
|
15770
15792
|
type: 1 /* ChangeType.Removed */ ,
|
|
15771
15793
|
doc: n.doc
|
|
15772
|
-
}) : 0 /* ChangeType.Added */ === e.type && 1 /* ChangeType.Removed */ === n.type ? this.
|
|
15794
|
+
}) : 0 /* ChangeType.Added */ === e.type && 1 /* ChangeType.Removed */ === n.type ? this.pa = this.pa.insert(t, {
|
|
15773
15795
|
type: 2 /* ChangeType.Modified */ ,
|
|
15774
15796
|
doc: e.doc
|
|
15775
15797
|
}) :
|
|
@@ -15782,12 +15804,12 @@ class DelayedOperation {
|
|
|
15782
15804
|
// Removed->Metadata
|
|
15783
15805
|
fail(63341, {
|
|
15784
15806
|
Vt: e,
|
|
15785
|
-
|
|
15786
|
-
}) : this.
|
|
15807
|
+
ya: n
|
|
15808
|
+
}) : this.pa = this.pa.insert(t, e);
|
|
15787
15809
|
}
|
|
15788
|
-
|
|
15810
|
+
wa() {
|
|
15789
15811
|
const e = [];
|
|
15790
|
-
return this.
|
|
15812
|
+
return this.pa.inorderTraversal(((t, n) => {
|
|
15791
15813
|
e.push(n);
|
|
15792
15814
|
})), e;
|
|
15793
15815
|
}
|
|
@@ -15843,25 +15865,25 @@ class ViewSnapshot {
|
|
|
15843
15865
|
* tracked by EventManager.
|
|
15844
15866
|
*/ class __PRIVATE_QueryListenersInfo {
|
|
15845
15867
|
constructor() {
|
|
15846
|
-
this.
|
|
15868
|
+
this.Sa = void 0, this.ba = [];
|
|
15847
15869
|
}
|
|
15848
15870
|
// Helper methods that checks if the query has listeners that listening to remote store
|
|
15849
|
-
|
|
15850
|
-
return this.
|
|
15871
|
+
Da() {
|
|
15872
|
+
return this.ba.some((e => e.va()));
|
|
15851
15873
|
}
|
|
15852
15874
|
}
|
|
15853
15875
|
|
|
15854
15876
|
class __PRIVATE_EventManagerImpl {
|
|
15855
15877
|
constructor() {
|
|
15856
15878
|
this.queries = __PRIVATE_newQueriesObjectMap(), this.onlineState = "Unknown" /* OnlineState.Unknown */ ,
|
|
15857
|
-
this.
|
|
15879
|
+
this.Ca = new Set;
|
|
15858
15880
|
}
|
|
15859
15881
|
terminate() {
|
|
15860
15882
|
!function __PRIVATE_errorAllTargets(e, t) {
|
|
15861
15883
|
const n = __PRIVATE_debugCast(e), r = n.queries;
|
|
15862
15884
|
// Prevent further access by clearing ObjectMap.
|
|
15863
15885
|
n.queries = __PRIVATE_newQueriesObjectMap(), r.forEach(((e, n) => {
|
|
15864
|
-
for (const e of n.
|
|
15886
|
+
for (const e of n.ba) e.onError(t);
|
|
15865
15887
|
}));
|
|
15866
15888
|
}
|
|
15867
15889
|
// Call all global snapshot listeners that have been set.
|
|
@@ -15878,19 +15900,19 @@ async function __PRIVATE_eventManagerListen(e, t) {
|
|
|
15878
15900
|
let r = 3 /* ListenerSetupAction.NoActionRequired */;
|
|
15879
15901
|
const i = t.query;
|
|
15880
15902
|
let s = n.queries.get(i);
|
|
15881
|
-
s ? !s.
|
|
15903
|
+
s ? !s.Da() && t.va() && (
|
|
15882
15904
|
// Query has been listening to local cache, and tries to add a new listener sourced from watch.
|
|
15883
15905
|
r = 2 /* ListenerSetupAction.RequireWatchConnectionOnly */) : (s = new __PRIVATE_QueryListenersInfo,
|
|
15884
|
-
r = t.
|
|
15906
|
+
r = t.va() ? 0 /* ListenerSetupAction.InitializeLocalListenAndRequireWatchConnection */ : 1 /* ListenerSetupAction.InitializeLocalListenOnly */);
|
|
15885
15907
|
try {
|
|
15886
15908
|
switch (r) {
|
|
15887
15909
|
case 0 /* ListenerSetupAction.InitializeLocalListenAndRequireWatchConnection */ :
|
|
15888
|
-
s.
|
|
15910
|
+
s.Sa = await n.onListen(i,
|
|
15889
15911
|
/** enableRemoteListen= */ !0);
|
|
15890
15912
|
break;
|
|
15891
15913
|
|
|
15892
15914
|
case 1 /* ListenerSetupAction.InitializeLocalListenOnly */ :
|
|
15893
|
-
s.
|
|
15915
|
+
s.Sa = await n.onListen(i,
|
|
15894
15916
|
/** enableRemoteListen= */ !1);
|
|
15895
15917
|
break;
|
|
15896
15918
|
|
|
@@ -15901,10 +15923,10 @@ async function __PRIVATE_eventManagerListen(e, t) {
|
|
|
15901
15923
|
const n = __PRIVATE_wrapInUserErrorIfRecoverable(e, `Initialization of query '${__PRIVATE_stringifyQuery(t.query)}' failed`);
|
|
15902
15924
|
return void t.onError(n);
|
|
15903
15925
|
}
|
|
15904
|
-
if (n.queries.set(i, s), s.
|
|
15926
|
+
if (n.queries.set(i, s), s.ba.push(t),
|
|
15905
15927
|
// Run global snapshot listeners if a consistent snapshot has been emitted.
|
|
15906
|
-
t.
|
|
15907
|
-
t.
|
|
15928
|
+
t.Fa(n.onlineState), s.Sa) {
|
|
15929
|
+
t.Ma(s.Sa) && __PRIVATE_raiseSnapshotsInSyncEvent(n);
|
|
15908
15930
|
}
|
|
15909
15931
|
}
|
|
15910
15932
|
|
|
@@ -15913,8 +15935,8 @@ async function __PRIVATE_eventManagerUnlisten(e, t) {
|
|
|
15913
15935
|
let i = 3 /* ListenerRemovalAction.NoActionRequired */;
|
|
15914
15936
|
const s = n.queries.get(r);
|
|
15915
15937
|
if (s) {
|
|
15916
|
-
const e = s.
|
|
15917
|
-
e >= 0 && (s.
|
|
15938
|
+
const e = s.ba.indexOf(t);
|
|
15939
|
+
e >= 0 && (s.ba.splice(e, 1), 0 === s.ba.length ? i = t.va() ? 0 /* ListenerRemovalAction.TerminateLocalListenAndRequireWatchDisconnection */ : 1 /* ListenerRemovalAction.TerminateLocalListenOnly */ : !s.Da() && t.va() && (
|
|
15918
15940
|
// The removed listener is the last one that sourced from watch.
|
|
15919
15941
|
i = 2 /* ListenerRemovalAction.RequireWatchDisconnectionOnly */));
|
|
15920
15942
|
}
|
|
@@ -15941,8 +15963,8 @@ function __PRIVATE_eventManagerOnWatchChange(e, t) {
|
|
|
15941
15963
|
for (const e of t) {
|
|
15942
15964
|
const t = e.query, i = n.queries.get(t);
|
|
15943
15965
|
if (i) {
|
|
15944
|
-
for (const t of i.
|
|
15945
|
-
i.
|
|
15966
|
+
for (const t of i.ba) t.Ma(e) && (r = !0);
|
|
15967
|
+
i.Sa = e;
|
|
15946
15968
|
}
|
|
15947
15969
|
}
|
|
15948
15970
|
r && __PRIVATE_raiseSnapshotsInSyncEvent(n);
|
|
@@ -15950,14 +15972,14 @@ function __PRIVATE_eventManagerOnWatchChange(e, t) {
|
|
|
15950
15972
|
|
|
15951
15973
|
function __PRIVATE_eventManagerOnWatchError(e, t, n) {
|
|
15952
15974
|
const r = __PRIVATE_debugCast(e), i = r.queries.get(t);
|
|
15953
|
-
if (i) for (const e of i.
|
|
15975
|
+
if (i) for (const e of i.ba) e.onError(n);
|
|
15954
15976
|
// Remove all listeners. NOTE: We don't need to call syncEngine.unlisten()
|
|
15955
15977
|
// after an error.
|
|
15956
15978
|
r.queries.delete(t);
|
|
15957
15979
|
}
|
|
15958
15980
|
|
|
15959
15981
|
function __PRIVATE_raiseSnapshotsInSyncEvent(e) {
|
|
15960
|
-
e.
|
|
15982
|
+
e.Ca.forEach((e => {
|
|
15961
15983
|
e.next();
|
|
15962
15984
|
}));
|
|
15963
15985
|
}
|
|
@@ -15965,7 +15987,7 @@ function __PRIVATE_raiseSnapshotsInSyncEvent(e) {
|
|
|
15965
15987
|
var rn, sn;
|
|
15966
15988
|
|
|
15967
15989
|
/** Listen to both cache and server changes */
|
|
15968
|
-
(sn = rn || (rn = {})).
|
|
15990
|
+
(sn = rn || (rn = {})).xa = "default",
|
|
15969
15991
|
/** Listen to changes in cache only */
|
|
15970
15992
|
sn.Cache = "cache";
|
|
15971
15993
|
|
|
@@ -15977,12 +15999,12 @@ sn.Cache = "cache";
|
|
|
15977
15999
|
*/
|
|
15978
16000
|
class __PRIVATE_QueryListener {
|
|
15979
16001
|
constructor(e, t, n) {
|
|
15980
|
-
this.query = e, this.
|
|
16002
|
+
this.query = e, this.Oa = t,
|
|
15981
16003
|
/**
|
|
15982
16004
|
* Initial snapshots (e.g. from cache) may not be propagated to the wrapped
|
|
15983
16005
|
* observer. This flag is set to true once we've actually raised an event.
|
|
15984
16006
|
*/
|
|
15985
|
-
this.
|
|
16007
|
+
this.Na = !1, this.Ba = null, this.onlineState = "Unknown" /* OnlineState.Unknown */ ,
|
|
15986
16008
|
this.options = n || {};
|
|
15987
16009
|
}
|
|
15988
16010
|
/**
|
|
@@ -15990,7 +16012,7 @@ class __PRIVATE_QueryListener {
|
|
|
15990
16012
|
* if applicable (depending on what changed, whether the user has opted into
|
|
15991
16013
|
* metadata-only changes, etc.). Returns true if a user-facing event was
|
|
15992
16014
|
* indeed raised.
|
|
15993
|
-
*/
|
|
16015
|
+
*/ Ma(e) {
|
|
15994
16016
|
if (!this.options.includeMetadataChanges) {
|
|
15995
16017
|
// Remove the metadata only changes.
|
|
15996
16018
|
const t = [];
|
|
@@ -15999,49 +16021,49 @@ class __PRIVATE_QueryListener {
|
|
|
15999
16021
|
/* excludesMetadataChanges= */ !0, e.hasCachedResults);
|
|
16000
16022
|
}
|
|
16001
16023
|
let t = !1;
|
|
16002
|
-
return this.
|
|
16003
|
-
t = !0), this.
|
|
16024
|
+
return this.Na ? this.La(e) && (this.Oa.next(e), t = !0) : this.ka(e, this.onlineState) && (this.qa(e),
|
|
16025
|
+
t = !0), this.Ba = e, t;
|
|
16004
16026
|
}
|
|
16005
16027
|
onError(e) {
|
|
16006
|
-
this.
|
|
16028
|
+
this.Oa.error(e);
|
|
16007
16029
|
}
|
|
16008
|
-
/** Returns whether a snapshot was raised. */
|
|
16030
|
+
/** Returns whether a snapshot was raised. */ Fa(e) {
|
|
16009
16031
|
this.onlineState = e;
|
|
16010
16032
|
let t = !1;
|
|
16011
|
-
return this.
|
|
16033
|
+
return this.Ba && !this.Na && this.ka(this.Ba, e) && (this.qa(this.Ba), t = !0),
|
|
16012
16034
|
t;
|
|
16013
16035
|
}
|
|
16014
|
-
|
|
16036
|
+
ka(e, t) {
|
|
16015
16037
|
// Always raise the first event when we're synced
|
|
16016
16038
|
if (!e.fromCache) return !0;
|
|
16017
16039
|
// Always raise event if listening to cache
|
|
16018
|
-
if (!this.
|
|
16040
|
+
if (!this.va()) return !0;
|
|
16019
16041
|
// NOTE: We consider OnlineState.Unknown as online (it should become Offline
|
|
16020
16042
|
// or Online if we wait long enough).
|
|
16021
16043
|
const n = "Offline" /* OnlineState.Offline */ !== t;
|
|
16022
16044
|
// Don't raise the event if we're online, aren't synced yet (checked
|
|
16023
16045
|
// above) and are waiting for a sync.
|
|
16024
|
-
return (!this.options.
|
|
16046
|
+
return (!this.options.Qa || !n) && (!e.docs.isEmpty() || e.hasCachedResults || "Offline" /* OnlineState.Offline */ === t);
|
|
16025
16047
|
// Raise data from cache if we have any documents, have cached results before,
|
|
16026
16048
|
// or we are offline.
|
|
16027
16049
|
}
|
|
16028
|
-
|
|
16050
|
+
La(e) {
|
|
16029
16051
|
// We don't need to handle includeDocumentMetadataChanges here because
|
|
16030
16052
|
// the Metadata only changes have already been stripped out if needed.
|
|
16031
16053
|
// At this point the only changes we will see are the ones we should
|
|
16032
16054
|
// propagate.
|
|
16033
16055
|
if (e.docChanges.length > 0) return !0;
|
|
16034
|
-
const t = this.
|
|
16056
|
+
const t = this.Ba && this.Ba.hasPendingWrites !== e.hasPendingWrites;
|
|
16035
16057
|
return !(!e.syncStateChanged && !t) && !0 === this.options.includeMetadataChanges;
|
|
16036
16058
|
// Generally we should have hit one of the cases above, but it's possible
|
|
16037
16059
|
// to get here if there were only metadata docChanges and they got
|
|
16038
16060
|
// stripped out.
|
|
16039
16061
|
}
|
|
16040
|
-
|
|
16062
|
+
qa(e) {
|
|
16041
16063
|
e = ViewSnapshot.fromInitialDocuments(e.query, e.docs, e.mutatedKeys, e.fromCache, e.hasCachedResults),
|
|
16042
|
-
this.
|
|
16064
|
+
this.Na = !0, this.Oa.next(e);
|
|
16043
16065
|
}
|
|
16044
|
-
|
|
16066
|
+
va() {
|
|
16045
16067
|
return this.options.source !== rn.Cache;
|
|
16046
16068
|
}
|
|
16047
16069
|
}
|
|
@@ -16069,10 +16091,10 @@ class __PRIVATE_QueryListener {
|
|
|
16069
16091
|
constructor(e,
|
|
16070
16092
|
// How many bytes this element takes to store in the bundle.
|
|
16071
16093
|
t) {
|
|
16072
|
-
this
|
|
16094
|
+
this.$a = e, this.byteLength = t;
|
|
16073
16095
|
}
|
|
16074
|
-
|
|
16075
|
-
return "metadata" in this
|
|
16096
|
+
Ua() {
|
|
16097
|
+
return "metadata" in this.$a;
|
|
16076
16098
|
}
|
|
16077
16099
|
}
|
|
16078
16100
|
|
|
@@ -16116,7 +16138,7 @@ class __PRIVATE_QueryListener {
|
|
|
16116
16138
|
* storage and provide progress update while loading.
|
|
16117
16139
|
*/ class __PRIVATE_BundleLoader {
|
|
16118
16140
|
constructor(e, t, n) {
|
|
16119
|
-
this.
|
|
16141
|
+
this.Ka = e, this.localStore = t, this.serializer = n,
|
|
16120
16142
|
/** Batched queries to be saved into storage */
|
|
16121
16143
|
this.queries = [],
|
|
16122
16144
|
/** Batched documents to be saved into storage */
|
|
@@ -16129,21 +16151,21 @@ class __PRIVATE_QueryListener {
|
|
|
16129
16151
|
*
|
|
16130
16152
|
* Returns a new progress if adding the element leads to a new progress,
|
|
16131
16153
|
* otherwise returns null.
|
|
16132
|
-
*/
|
|
16154
|
+
*/ Wa(e) {
|
|
16133
16155
|
this.progress.bytesLoaded += e.byteLength;
|
|
16134
16156
|
let t = this.progress.documentsLoaded;
|
|
16135
|
-
if (e.
|
|
16157
|
+
if (e.$a.namedQuery) this.queries.push(e.$a.namedQuery); else if (e.$a.documentMetadata) {
|
|
16136
16158
|
this.documents.push({
|
|
16137
|
-
metadata: e.
|
|
16138
|
-
}), e.
|
|
16139
|
-
const n = ResourcePath.fromString(e.
|
|
16159
|
+
metadata: e.$a.documentMetadata
|
|
16160
|
+
}), e.$a.documentMetadata.exists || ++t;
|
|
16161
|
+
const n = ResourcePath.fromString(e.$a.documentMetadata.name);
|
|
16140
16162
|
this.collectionGroups.add(n.get(n.length - 2));
|
|
16141
|
-
} else e.
|
|
16163
|
+
} else e.$a.document && (this.documents[this.documents.length - 1].document = e.$a.document,
|
|
16142
16164
|
++t);
|
|
16143
16165
|
return t !== this.progress.documentsLoaded ? (this.progress.documentsLoaded = t,
|
|
16144
16166
|
Object.assign({}, this.progress)) : null;
|
|
16145
16167
|
}
|
|
16146
|
-
|
|
16168
|
+
Ga(e) {
|
|
16147
16169
|
const t = new Map, n = new __PRIVATE_BundleConverterImpl(this.serializer);
|
|
16148
16170
|
for (const r of e) if (r.metadata.queries) {
|
|
16149
16171
|
const e = n.Us(r.metadata.name);
|
|
@@ -16157,12 +16179,12 @@ class __PRIVATE_QueryListener {
|
|
|
16157
16179
|
/**
|
|
16158
16180
|
* Update the progress to 'Success' and return the updated progress.
|
|
16159
16181
|
*/ async complete() {
|
|
16160
|
-
const e = await __PRIVATE_localStoreApplyBundledDocuments(this.localStore, new __PRIVATE_BundleConverterImpl(this.serializer), this.documents, this.
|
|
16182
|
+
const e = await __PRIVATE_localStoreApplyBundledDocuments(this.localStore, new __PRIVATE_BundleConverterImpl(this.serializer), this.documents, this.Ka.id), t = this.Ga(this.documents);
|
|
16161
16183
|
for (const e of this.queries) await __PRIVATE_localStoreSaveNamedQuery(this.localStore, e, t.get(e.name));
|
|
16162
16184
|
return this.progress.taskState = "Success", {
|
|
16163
16185
|
progress: this.progress,
|
|
16164
|
-
|
|
16165
|
-
|
|
16186
|
+
za: this.collectionGroups,
|
|
16187
|
+
ja: e
|
|
16166
16188
|
};
|
|
16167
16189
|
}
|
|
16168
16190
|
}
|
|
@@ -16220,7 +16242,7 @@ class __PRIVATE_RemovedLimboDocument {
|
|
|
16220
16242
|
constructor(e,
|
|
16221
16243
|
/** Documents included in the remote target */
|
|
16222
16244
|
t) {
|
|
16223
|
-
this.query = e, this.
|
|
16245
|
+
this.query = e, this.Ha = t, this.Ja = null, this.hasCachedResults = !1,
|
|
16224
16246
|
/**
|
|
16225
16247
|
* A flag whether the view is current with the backend. A view is considered
|
|
16226
16248
|
* current after it has seen the current flag from the backend and did not
|
|
@@ -16229,16 +16251,16 @@ class __PRIVATE_RemovedLimboDocument {
|
|
|
16229
16251
|
*/
|
|
16230
16252
|
this.current = !1,
|
|
16231
16253
|
/** Documents in the view but not in the remote target */
|
|
16232
|
-
this.
|
|
16254
|
+
this.Ya = __PRIVATE_documentKeySet(),
|
|
16233
16255
|
/** Document Keys that have local changes */
|
|
16234
|
-
this.mutatedKeys = __PRIVATE_documentKeySet(), this.
|
|
16235
|
-
this.
|
|
16256
|
+
this.mutatedKeys = __PRIVATE_documentKeySet(), this.Za = __PRIVATE_newQueryComparator(e),
|
|
16257
|
+
this.Xa = new DocumentSet(this.Za);
|
|
16236
16258
|
}
|
|
16237
16259
|
/**
|
|
16238
16260
|
* The set of remote documents that the server has told us belongs to the target associated with
|
|
16239
16261
|
* this view.
|
|
16240
|
-
*/ get
|
|
16241
|
-
return this.
|
|
16262
|
+
*/ get eu() {
|
|
16263
|
+
return this.Ha;
|
|
16242
16264
|
}
|
|
16243
16265
|
/**
|
|
16244
16266
|
* Iterates over a set of doc changes, applies the query limit, and computes
|
|
@@ -16249,8 +16271,8 @@ class __PRIVATE_RemovedLimboDocument {
|
|
|
16249
16271
|
* @param previousChanges - If this is being called with a refill, then start
|
|
16250
16272
|
* with this set of docs and changes instead of the current view.
|
|
16251
16273
|
* @returns a new set of docs, changes, and refill flag.
|
|
16252
|
-
*/
|
|
16253
|
-
const n = t ? t.
|
|
16274
|
+
*/ tu(e, t) {
|
|
16275
|
+
const n = t ? t.nu : new __PRIVATE_DocumentChangeSet, r = t ? t.Xa : this.Xa;
|
|
16254
16276
|
let i = t ? t.mutatedKeys : this.mutatedKeys, s = r, o = !1;
|
|
16255
16277
|
// Track the last doc in a (full) limit. This is necessary, because some
|
|
16256
16278
|
// update (a delete, or an update moving a doc past the old limit) might
|
|
@@ -16273,10 +16295,10 @@ class __PRIVATE_RemovedLimboDocument {
|
|
|
16273
16295
|
u.data.isEqual(c.data) ? l !== h && (n.track({
|
|
16274
16296
|
type: 3 /* ChangeType.Metadata */ ,
|
|
16275
16297
|
doc: c
|
|
16276
|
-
}), P = !0) : this.
|
|
16298
|
+
}), P = !0) : this.ru(u, c) || (n.track({
|
|
16277
16299
|
type: 2 /* ChangeType.Modified */ ,
|
|
16278
16300
|
doc: c
|
|
16279
|
-
}), P = !0, (_ && this.
|
|
16301
|
+
}), P = !0, (_ && this.Za(c, _) > 0 || a && this.Za(c, a) < 0) && (
|
|
16280
16302
|
// This doc moved from inside the limit to outside the limit.
|
|
16281
16303
|
// That means there may be some other doc in the local cache
|
|
16282
16304
|
// that should be included instead.
|
|
@@ -16301,13 +16323,13 @@ class __PRIVATE_RemovedLimboDocument {
|
|
|
16301
16323
|
});
|
|
16302
16324
|
}
|
|
16303
16325
|
return {
|
|
16304
|
-
|
|
16305
|
-
|
|
16326
|
+
Xa: s,
|
|
16327
|
+
nu: n,
|
|
16306
16328
|
Cs: o,
|
|
16307
16329
|
mutatedKeys: i
|
|
16308
16330
|
};
|
|
16309
16331
|
}
|
|
16310
|
-
|
|
16332
|
+
ru(e, t) {
|
|
16311
16333
|
// We suppress the initial change event for documents that were modified as
|
|
16312
16334
|
// part of a write acknowledgment (e.g. when the value of a server transform
|
|
16313
16335
|
// is applied) as Watch will send us the same document again.
|
|
@@ -16332,10 +16354,10 @@ class __PRIVATE_RemovedLimboDocument {
|
|
|
16332
16354
|
*/
|
|
16333
16355
|
// PORTING NOTE: The iOS/Android clients always compute limbo document changes.
|
|
16334
16356
|
applyChanges(e, t, n, r) {
|
|
16335
|
-
const i = this.
|
|
16336
|
-
this.
|
|
16357
|
+
const i = this.Xa;
|
|
16358
|
+
this.Xa = e.Xa, this.mutatedKeys = e.mutatedKeys;
|
|
16337
16359
|
// Sort changes based on type and query comparator
|
|
16338
|
-
const s = e.
|
|
16360
|
+
const s = e.nu.wa();
|
|
16339
16361
|
s.sort(((e, t) => function __PRIVATE_compareChangeType(e, t) {
|
|
16340
16362
|
const order = e => {
|
|
16341
16363
|
switch (e) {
|
|
@@ -16375,70 +16397,70 @@ class __PRIVATE_RemovedLimboDocument {
|
|
|
16375
16397
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
16376
16398
|
* See the License for the specific language governing permissions and
|
|
16377
16399
|
* limitations under the License.
|
|
16378
|
-
*/ (e.type, t.type) || this.
|
|
16379
|
-
const o = t && !r ? this.
|
|
16400
|
+
*/ (e.type, t.type) || this.Za(e.doc, t.doc))), this.iu(n), r = null != r && r;
|
|
16401
|
+
const o = t && !r ? this.su() : [], _ = 0 === this.Ya.size && this.current && !r ? 1 /* SyncState.Synced */ : 0 /* SyncState.Local */ , a = _ !== this.Ja;
|
|
16380
16402
|
// We are at synced state if there is no limbo docs are waiting to be resolved, view is current
|
|
16381
16403
|
// with the backend, and the query is not pending to reset due to existence filter mismatch.
|
|
16382
|
-
if (this.
|
|
16404
|
+
if (this.Ja = _, 0 !== s.length || a) {
|
|
16383
16405
|
return {
|
|
16384
|
-
snapshot: new ViewSnapshot(this.query, e.
|
|
16406
|
+
snapshot: new ViewSnapshot(this.query, e.Xa, i, s, e.mutatedKeys, 0 /* SyncState.Local */ === _, a,
|
|
16385
16407
|
/* excludesMetadataChanges= */ !1, !!n && n.resumeToken.approximateByteSize() > 0),
|
|
16386
|
-
|
|
16408
|
+
ou: o
|
|
16387
16409
|
};
|
|
16388
16410
|
}
|
|
16389
16411
|
// no changes
|
|
16390
16412
|
return {
|
|
16391
|
-
|
|
16413
|
+
ou: o
|
|
16392
16414
|
};
|
|
16393
16415
|
}
|
|
16394
16416
|
/**
|
|
16395
16417
|
* Applies an OnlineState change to the view, potentially generating a
|
|
16396
16418
|
* ViewChange if the view's syncState changes as a result.
|
|
16397
|
-
*/
|
|
16419
|
+
*/ Fa(e) {
|
|
16398
16420
|
return this.current && "Offline" /* OnlineState.Offline */ === e ? (
|
|
16399
16421
|
// If we're offline, set `current` to false and then call applyChanges()
|
|
16400
16422
|
// to refresh our syncState and generate a ViewChange as appropriate. We
|
|
16401
16423
|
// are guaranteed to get a new TargetChange that sets `current` back to
|
|
16402
16424
|
// true once the client is back online.
|
|
16403
16425
|
this.current = !1, this.applyChanges({
|
|
16404
|
-
|
|
16405
|
-
|
|
16426
|
+
Xa: this.Xa,
|
|
16427
|
+
nu: new __PRIVATE_DocumentChangeSet,
|
|
16406
16428
|
mutatedKeys: this.mutatedKeys,
|
|
16407
16429
|
Cs: !1
|
|
16408
16430
|
},
|
|
16409
16431
|
/* limboResolutionEnabled= */ !1)) : {
|
|
16410
|
-
|
|
16432
|
+
ou: []
|
|
16411
16433
|
};
|
|
16412
16434
|
}
|
|
16413
16435
|
/**
|
|
16414
16436
|
* Returns whether the doc for the given key should be in limbo.
|
|
16415
|
-
*/
|
|
16437
|
+
*/ _u(e) {
|
|
16416
16438
|
// If the remote end says it's part of this query, it's not in limbo.
|
|
16417
|
-
return !this.
|
|
16439
|
+
return !this.Ha.has(e) && (
|
|
16418
16440
|
// The local store doesn't think it's a result, so it shouldn't be in limbo.
|
|
16419
|
-
!!this.
|
|
16441
|
+
!!this.Xa.has(e) && !this.Xa.get(e).hasLocalMutations);
|
|
16420
16442
|
}
|
|
16421
16443
|
/**
|
|
16422
16444
|
* Updates syncedDocuments, current, and limbo docs based on the given change.
|
|
16423
16445
|
* Returns the list of changes to which docs are in limbo.
|
|
16424
|
-
*/
|
|
16425
|
-
e && (e.addedDocuments.forEach((e => this.
|
|
16426
|
-
e.removedDocuments.forEach((e => this.
|
|
16446
|
+
*/ iu(e) {
|
|
16447
|
+
e && (e.addedDocuments.forEach((e => this.Ha = this.Ha.add(e))), e.modifiedDocuments.forEach((e => {})),
|
|
16448
|
+
e.removedDocuments.forEach((e => this.Ha = this.Ha.delete(e))), this.current = e.current);
|
|
16427
16449
|
}
|
|
16428
|
-
|
|
16450
|
+
su() {
|
|
16429
16451
|
// We can only determine limbo documents when we're in-sync with the server.
|
|
16430
16452
|
if (!this.current) return [];
|
|
16431
16453
|
// TODO(klimt): Do this incrementally so that it's not quadratic when
|
|
16432
16454
|
// updating many documents.
|
|
16433
|
-
const e = this.
|
|
16434
|
-
this.
|
|
16435
|
-
this.
|
|
16455
|
+
const e = this.Ya;
|
|
16456
|
+
this.Ya = __PRIVATE_documentKeySet(), this.Xa.forEach((e => {
|
|
16457
|
+
this._u(e.key) && (this.Ya = this.Ya.add(e.key));
|
|
16436
16458
|
}));
|
|
16437
16459
|
// Diff the new limbo docs with the old limbo docs.
|
|
16438
16460
|
const t = [];
|
|
16439
16461
|
return e.forEach((e => {
|
|
16440
|
-
this.
|
|
16441
|
-
})), this.
|
|
16462
|
+
this.Ya.has(e) || t.push(new __PRIVATE_RemovedLimboDocument(e));
|
|
16463
|
+
})), this.Ya.forEach((n => {
|
|
16442
16464
|
e.has(n) || t.push(new __PRIVATE_AddedLimboDocument(n));
|
|
16443
16465
|
})), t;
|
|
16444
16466
|
}
|
|
@@ -16462,9 +16484,9 @@ class __PRIVATE_RemovedLimboDocument {
|
|
|
16462
16484
|
* @returns The ViewChange that resulted from this synchronization.
|
|
16463
16485
|
*/
|
|
16464
16486
|
// PORTING NOTE: Multi-tab only.
|
|
16465
|
-
|
|
16466
|
-
this.
|
|
16467
|
-
const t = this.
|
|
16487
|
+
au(e) {
|
|
16488
|
+
this.Ha = e.$s, this.Ya = __PRIVATE_documentKeySet();
|
|
16489
|
+
const t = this.tu(e.documents);
|
|
16468
16490
|
return this.applyChanges(t, /* limboResolutionEnabled= */ !0);
|
|
16469
16491
|
}
|
|
16470
16492
|
/**
|
|
@@ -16473,8 +16495,8 @@ class __PRIVATE_RemovedLimboDocument {
|
|
|
16473
16495
|
* `hasPendingWrites` status of the already established view.
|
|
16474
16496
|
*/
|
|
16475
16497
|
// PORTING NOTE: Multi-tab only.
|
|
16476
|
-
|
|
16477
|
-
return ViewSnapshot.fromInitialDocuments(this.query, this.
|
|
16498
|
+
uu() {
|
|
16499
|
+
return ViewSnapshot.fromInitialDocuments(this.query, this.Xa, this.mutatedKeys, 0 /* SyncState.Local */ === this.Ja, this.hasCachedResults);
|
|
16478
16500
|
}
|
|
16479
16501
|
}
|
|
16480
16502
|
|
|
@@ -16514,7 +16536,7 @@ const on = "SyncEngine";
|
|
|
16514
16536
|
* decide whether it needs to manufacture a delete event for the target once
|
|
16515
16537
|
* the target is CURRENT.
|
|
16516
16538
|
*/
|
|
16517
|
-
this.
|
|
16539
|
+
this.cu = !1;
|
|
16518
16540
|
}
|
|
16519
16541
|
}
|
|
16520
16542
|
|
|
@@ -16535,8 +16557,8 @@ const on = "SyncEngine";
|
|
|
16535
16557
|
// PORTING NOTE: Manages state synchronization in multi-tab environments.
|
|
16536
16558
|
r, i, s) {
|
|
16537
16559
|
this.localStore = e, this.remoteStore = t, this.eventManager = n, this.sharedClientState = r,
|
|
16538
|
-
this.currentUser = i, this.maxConcurrentLimboResolutions = s, this.
|
|
16539
|
-
this.
|
|
16560
|
+
this.currentUser = i, this.maxConcurrentLimboResolutions = s, this.lu = {}, this.hu = new ObjectMap((e => __PRIVATE_canonifyQuery(e)), __PRIVATE_queryEquals),
|
|
16561
|
+
this.Pu = new Map,
|
|
16540
16562
|
/**
|
|
16541
16563
|
* The keys of documents that are in limbo for which we haven't yet started a
|
|
16542
16564
|
* limbo resolution query. The strings in this set are the result of calling
|
|
@@ -16546,28 +16568,28 @@ const on = "SyncEngine";
|
|
|
16546
16568
|
* of arbitrary elements and it also maintains insertion order, providing the
|
|
16547
16569
|
* desired queue-like FIFO semantics.
|
|
16548
16570
|
*/
|
|
16549
|
-
this.
|
|
16571
|
+
this.Tu = new Set,
|
|
16550
16572
|
/**
|
|
16551
16573
|
* Keeps track of the target ID for each document that is in limbo with an
|
|
16552
16574
|
* active target.
|
|
16553
16575
|
*/
|
|
16554
|
-
this.
|
|
16576
|
+
this.Iu = new SortedMap(DocumentKey.comparator),
|
|
16555
16577
|
/**
|
|
16556
16578
|
* Keeps track of the information about an active limbo resolution for each
|
|
16557
16579
|
* active target ID that was started for the purpose of limbo resolution.
|
|
16558
16580
|
*/
|
|
16559
|
-
this.
|
|
16581
|
+
this.Eu = new Map, this.du = new __PRIVATE_ReferenceSet,
|
|
16560
16582
|
/** Stores user completion handlers, indexed by User and BatchId. */
|
|
16561
|
-
this.
|
|
16583
|
+
this.Au = {},
|
|
16562
16584
|
/** Stores user callbacks waiting for all pending writes to be acknowledged. */
|
|
16563
|
-
this.
|
|
16585
|
+
this.Ru = new Map, this.Vu = __PRIVATE_TargetIdGenerator.lr(), this.onlineState = "Unknown" /* OnlineState.Unknown */ ,
|
|
16564
16586
|
// The primary state is set to `true` or `false` immediately after Firestore
|
|
16565
16587
|
// startup. In the interim, a client should only be considered primary if
|
|
16566
16588
|
// `isPrimary` is true.
|
|
16567
|
-
this.
|
|
16589
|
+
this.mu = void 0;
|
|
16568
16590
|
}
|
|
16569
16591
|
get isPrimaryClient() {
|
|
16570
|
-
return !0 === this.
|
|
16592
|
+
return !0 === this.mu;
|
|
16571
16593
|
}
|
|
16572
16594
|
}
|
|
16573
16595
|
|
|
@@ -16579,7 +16601,7 @@ const on = "SyncEngine";
|
|
|
16579
16601
|
async function __PRIVATE_syncEngineListen(e, t, n = !0) {
|
|
16580
16602
|
const r = __PRIVATE_ensureWatchCallbacks(e);
|
|
16581
16603
|
let i;
|
|
16582
|
-
const s = r.
|
|
16604
|
+
const s = r.hu.get(t);
|
|
16583
16605
|
return s ? (
|
|
16584
16606
|
// PORTING NOTE: With Multi-Tab Web, it is possible that a query view
|
|
16585
16607
|
// already exists when EventManager calls us for the first time. This
|
|
@@ -16587,7 +16609,7 @@ async function __PRIVATE_syncEngineListen(e, t, n = !0) {
|
|
|
16587
16609
|
// behalf of another tab and the user of the primary also starts listening
|
|
16588
16610
|
// to the query. EventManager will not have an assigned target ID in this
|
|
16589
16611
|
// case and calls `listen` to obtain this ID.
|
|
16590
|
-
r.sharedClientState.addLocalQueryTarget(s.targetId), i = s.view.
|
|
16612
|
+
r.sharedClientState.addLocalQueryTarget(s.targetId), i = s.view.uu()) : i = await __PRIVATE_allocateTargetAndMaybeListen(r, t, n,
|
|
16591
16613
|
/** shouldInitializeView= */ !0), i;
|
|
16592
16614
|
}
|
|
16593
16615
|
|
|
@@ -16612,30 +16634,30 @@ async function __PRIVATE_allocateTargetAndMaybeListen(e, t, n, r) {
|
|
|
16612
16634
|
// PORTING NOTE: On Web only, we inject the code that registers new Limbo
|
|
16613
16635
|
// targets based on view changes. This allows us to only depend on Limbo
|
|
16614
16636
|
// changes when user code includes queries.
|
|
16615
|
-
e.
|
|
16616
|
-
let i = t.view.
|
|
16637
|
+
e.fu = (t, n, r) => async function __PRIVATE_applyDocChanges(e, t, n, r) {
|
|
16638
|
+
let i = t.view.tu(n);
|
|
16617
16639
|
i.Cs && (
|
|
16618
16640
|
// The query has a limit and some docs were removed, so we need
|
|
16619
16641
|
// to re-run the query against the local store to make sure we
|
|
16620
16642
|
// didn't lose any good docs that had been past the limit.
|
|
16621
16643
|
i = await __PRIVATE_localStoreExecuteQuery(e.localStore, t.query,
|
|
16622
|
-
/* usePreviousResults= */ !1).then((({documents: e}) => t.view.
|
|
16644
|
+
/* usePreviousResults= */ !1).then((({documents: e}) => t.view.tu(e, i))));
|
|
16623
16645
|
const s = r && r.targetChanges.get(t.targetId), o = r && null != r.targetMismatches.get(t.targetId), _ = t.view.applyChanges(i,
|
|
16624
16646
|
/* limboResolutionEnabled= */ e.isPrimaryClient, s, o);
|
|
16625
|
-
return __PRIVATE_updateTrackedLimbos(e, t.targetId, _.
|
|
16647
|
+
return __PRIVATE_updateTrackedLimbos(e, t.targetId, _.ou), _.snapshot;
|
|
16626
16648
|
}(e, t, n, r);
|
|
16627
16649
|
const s = await __PRIVATE_localStoreExecuteQuery(e.localStore, t,
|
|
16628
|
-
/* usePreviousResults= */ !0), o = new __PRIVATE_View(t, s.$s), _ = o.
|
|
16650
|
+
/* usePreviousResults= */ !0), o = new __PRIVATE_View(t, s.$s), _ = o.tu(s.documents), a = TargetChange.createSynthesizedTargetChangeForCurrentChange(n, r && "Offline" /* OnlineState.Offline */ !== e.onlineState, i), u = o.applyChanges(_,
|
|
16629
16651
|
/* limboResolutionEnabled= */ e.isPrimaryClient, a);
|
|
16630
|
-
__PRIVATE_updateTrackedLimbos(e, n, u.
|
|
16652
|
+
__PRIVATE_updateTrackedLimbos(e, n, u.ou);
|
|
16631
16653
|
const c = new __PRIVATE_QueryView(t, n, o);
|
|
16632
|
-
return e.
|
|
16654
|
+
return e.hu.set(t, c), e.Pu.has(n) ? e.Pu.get(n).push(t) : e.Pu.set(n, [ t ]), u.snapshot;
|
|
16633
16655
|
}
|
|
16634
16656
|
|
|
16635
16657
|
/** Stops listening to the query. */ async function __PRIVATE_syncEngineUnlisten(e, t, n) {
|
|
16636
|
-
const r = __PRIVATE_debugCast(e), i = r.
|
|
16637
|
-
if (s.length > 1) return r.
|
|
16638
|
-
void r.
|
|
16658
|
+
const r = __PRIVATE_debugCast(e), i = r.hu.get(t), s = r.Pu.get(i.targetId);
|
|
16659
|
+
if (s.length > 1) return r.Pu.set(i.targetId, s.filter((e => !__PRIVATE_queryEquals(e, t)))),
|
|
16660
|
+
void r.hu.delete(t);
|
|
16639
16661
|
// No other queries are mapped to the target, clean up the query and the target.
|
|
16640
16662
|
if (r.isPrimaryClient) {
|
|
16641
16663
|
// We need to remove the local query target first to allow us to verify
|
|
@@ -16651,7 +16673,7 @@ async function __PRIVATE_allocateTargetAndMaybeListen(e, t, n, r) {
|
|
|
16651
16673
|
}
|
|
16652
16674
|
|
|
16653
16675
|
/** Unlistens to the remote store while still listening to the cache. */ async function __PRIVATE_triggerRemoteStoreUnlisten(e, t) {
|
|
16654
|
-
const n = __PRIVATE_debugCast(e), r = n.
|
|
16676
|
+
const n = __PRIVATE_debugCast(e), r = n.hu.get(t), i = n.Pu.get(r.targetId);
|
|
16655
16677
|
n.isPrimaryClient && 1 === i.length && (
|
|
16656
16678
|
// PORTING NOTE: Unregister the target ID with local Firestore client as
|
|
16657
16679
|
// watch target.
|
|
@@ -16713,9 +16735,9 @@ async function __PRIVATE_allocateTargetAndMaybeListen(e, t, n, r) {
|
|
|
16713
16735
|
})));
|
|
16714
16736
|
}(r.localStore, t);
|
|
16715
16737
|
r.sharedClientState.addPendingMutation(e.batchId), function __PRIVATE_addMutationCallback(e, t, n) {
|
|
16716
|
-
let r = e.
|
|
16738
|
+
let r = e.Au[e.currentUser.toKey()];
|
|
16717
16739
|
r || (r = new SortedMap(__PRIVATE_primitiveComparator));
|
|
16718
|
-
r = r.insert(t, n), e.
|
|
16740
|
+
r = r.insert(t, n), e.Au[e.currentUser.toKey()] = r;
|
|
16719
16741
|
}
|
|
16720
16742
|
/**
|
|
16721
16743
|
* Resolves or rejects the user callback for the given batch and then discards
|
|
@@ -16740,13 +16762,13 @@ async function __PRIVATE_allocateTargetAndMaybeListen(e, t, n, r) {
|
|
|
16740
16762
|
const e = await __PRIVATE_localStoreApplyRemoteEventToLocalCache(n.localStore, t);
|
|
16741
16763
|
// Update `receivedDocument` as appropriate for any limbo targets.
|
|
16742
16764
|
t.targetChanges.forEach(((e, t) => {
|
|
16743
|
-
const r = n.
|
|
16765
|
+
const r = n.Eu.get(t);
|
|
16744
16766
|
r && (
|
|
16745
16767
|
// Since this is a limbo resolution lookup, it's for a single document
|
|
16746
16768
|
// and it could be added, modified, or removed, but not a combination.
|
|
16747
16769
|
__PRIVATE_hardAssert(e.addedDocuments.size + e.modifiedDocuments.size + e.removedDocuments.size <= 1, 22616),
|
|
16748
|
-
e.addedDocuments.size > 0 ? r.
|
|
16749
|
-
r.
|
|
16770
|
+
e.addedDocuments.size > 0 ? r.cu = !0 : e.modifiedDocuments.size > 0 ? __PRIVATE_hardAssert(r.cu, 14607) : e.removedDocuments.size > 0 && (__PRIVATE_hardAssert(r.cu, 42227),
|
|
16771
|
+
r.cu = !1));
|
|
16750
16772
|
})), await __PRIVATE_syncEngineEmitNewSnapsAndNotifyLocalStore(n, e, t);
|
|
16751
16773
|
} catch (e) {
|
|
16752
16774
|
await __PRIVATE_ignoreIfPrimaryLeaseLoss(e);
|
|
@@ -16764,19 +16786,19 @@ async function __PRIVATE_allocateTargetAndMaybeListen(e, t, n, r) {
|
|
|
16764
16786
|
// SharedClientState.
|
|
16765
16787
|
if (r.isPrimaryClient && 0 /* OnlineStateSource.RemoteStore */ === n || !r.isPrimaryClient && 1 /* OnlineStateSource.SharedClientState */ === n) {
|
|
16766
16788
|
const e = [];
|
|
16767
|
-
r.
|
|
16768
|
-
const i = r.view.
|
|
16789
|
+
r.hu.forEach(((n, r) => {
|
|
16790
|
+
const i = r.view.Fa(t);
|
|
16769
16791
|
i.snapshot && e.push(i.snapshot);
|
|
16770
16792
|
})), function __PRIVATE_eventManagerOnOnlineStateChange(e, t) {
|
|
16771
16793
|
const n = __PRIVATE_debugCast(e);
|
|
16772
16794
|
n.onlineState = t;
|
|
16773
16795
|
let r = !1;
|
|
16774
16796
|
n.queries.forEach(((e, n) => {
|
|
16775
|
-
for (const e of n.
|
|
16797
|
+
for (const e of n.ba)
|
|
16776
16798
|
// Run global snapshot listeners if a consistent snapshot has been emitted.
|
|
16777
|
-
e.
|
|
16799
|
+
e.Fa(t) && (r = !0);
|
|
16778
16800
|
})), r && __PRIVATE_raiseSnapshotsInSyncEvent(n);
|
|
16779
|
-
}(r.eventManager, t), e.length && r.
|
|
16801
|
+
}(r.eventManager, t), e.length && r.lu.Y_(e), r.onlineState = t, r.isPrimaryClient && r.sharedClientState.setOnlineState(t);
|
|
16780
16802
|
}
|
|
16781
16803
|
}
|
|
16782
16804
|
|
|
@@ -16794,7 +16816,7 @@ async function __PRIVATE_allocateTargetAndMaybeListen(e, t, n, r) {
|
|
|
16794
16816
|
const r = __PRIVATE_debugCast(e);
|
|
16795
16817
|
// PORTING NOTE: Multi-tab only.
|
|
16796
16818
|
r.sharedClientState.updateQueryState(t, "rejected", n);
|
|
16797
|
-
const i = r.
|
|
16819
|
+
const i = r.Eu.get(t), s = i && i.key;
|
|
16798
16820
|
if (s) {
|
|
16799
16821
|
// TODO(klimt): We really only should do the following on permission
|
|
16800
16822
|
// denied errors, but we don't have the cause code here.
|
|
@@ -16816,7 +16838,7 @@ async function __PRIVATE_allocateTargetAndMaybeListen(e, t, n, r) {
|
|
|
16816
16838
|
// RemoteEvent. If `applyRemoteEvent()` throws, we want to re-listen to
|
|
16817
16839
|
// this query when the RemoteStore restarts the Watch stream, which should
|
|
16818
16840
|
// re-trigger the target failure.
|
|
16819
|
-
r.
|
|
16841
|
+
r.Iu = r.Iu.remove(s), r.Eu.delete(t), __PRIVATE_pumpEnqueuedLimboResolutions(r);
|
|
16820
16842
|
} else await __PRIVATE_localStoreReleaseTarget(r.localStore, t,
|
|
16821
16843
|
/* keepPersistedTargetData */ !1).then((() => __PRIVATE_removeAndCleanupTarget(r, t, n))).catch(__PRIVATE_ignoreIfPrimaryLeaseLoss);
|
|
16822
16844
|
}
|
|
@@ -16878,8 +16900,8 @@ async function __PRIVATE_syncEngineRejectFailedWrite(e, t, n) {
|
|
|
16878
16900
|
if (e === j)
|
|
16879
16901
|
// Trigger the callback right away if there is no pending writes at the moment.
|
|
16880
16902
|
return void t.resolve();
|
|
16881
|
-
const r = n.
|
|
16882
|
-
r.push(t), n.
|
|
16903
|
+
const r = n.Ru.get(e) || [];
|
|
16904
|
+
r.push(t), n.Ru.set(e, r);
|
|
16883
16905
|
} catch (e) {
|
|
16884
16906
|
const n = __PRIVATE_wrapInUserErrorIfRecoverable(e, "Initialization of waitForPendingWrites() operation failed");
|
|
16885
16907
|
t.reject(n);
|
|
@@ -16890,28 +16912,28 @@ async function __PRIVATE_syncEngineRejectFailedWrite(e, t, n) {
|
|
|
16890
16912
|
* Triggers the callbacks that are waiting for this batch id to get acknowledged by server,
|
|
16891
16913
|
* if there are any.
|
|
16892
16914
|
*/ function __PRIVATE_triggerPendingWritesCallbacks(e, t) {
|
|
16893
|
-
(e.
|
|
16915
|
+
(e.Ru.get(t) || []).forEach((e => {
|
|
16894
16916
|
e.resolve();
|
|
16895
|
-
})), e.
|
|
16917
|
+
})), e.Ru.delete(t);
|
|
16896
16918
|
}
|
|
16897
16919
|
|
|
16898
16920
|
/** Reject all outstanding callbacks waiting for pending writes to complete. */ function __PRIVATE_processUserCallback(e, t, n) {
|
|
16899
16921
|
const r = __PRIVATE_debugCast(e);
|
|
16900
|
-
let i = r.
|
|
16922
|
+
let i = r.Au[r.currentUser.toKey()];
|
|
16901
16923
|
// NOTE: Mutations restored from persistence won't have callbacks, so it's
|
|
16902
16924
|
// okay for there to be no callback for this ID.
|
|
16903
16925
|
if (i) {
|
|
16904
16926
|
const e = i.get(t);
|
|
16905
|
-
e && (n ? e.reject(n) : e.resolve(), i = i.remove(t)), r.
|
|
16927
|
+
e && (n ? e.reject(n) : e.resolve(), i = i.remove(t)), r.Au[r.currentUser.toKey()] = i;
|
|
16906
16928
|
}
|
|
16907
16929
|
}
|
|
16908
16930
|
|
|
16909
16931
|
function __PRIVATE_removeAndCleanupTarget(e, t, n = null) {
|
|
16910
16932
|
e.sharedClientState.removeLocalQueryTarget(t);
|
|
16911
|
-
for (const r of e.
|
|
16912
|
-
if (e.
|
|
16913
|
-
e.
|
|
16914
|
-
e.
|
|
16933
|
+
for (const r of e.Pu.get(t)) e.hu.delete(r), n && e.lu.gu(r, n);
|
|
16934
|
+
if (e.Pu.delete(t), e.isPrimaryClient) {
|
|
16935
|
+
e.du.Hr(t).forEach((t => {
|
|
16936
|
+
e.du.containsKey(t) ||
|
|
16915
16937
|
// We removed the last reference for this key
|
|
16916
16938
|
__PRIVATE_removeLimboTarget(e, t);
|
|
16917
16939
|
}));
|
|
@@ -16919,30 +16941,30 @@ function __PRIVATE_removeAndCleanupTarget(e, t, n = null) {
|
|
|
16919
16941
|
}
|
|
16920
16942
|
|
|
16921
16943
|
function __PRIVATE_removeLimboTarget(e, t) {
|
|
16922
|
-
e.
|
|
16944
|
+
e.Tu.delete(t.path.canonicalString());
|
|
16923
16945
|
// It's possible that the target already got removed because the query failed. In that case,
|
|
16924
16946
|
// the key won't exist in `limboTargetsByKey`. Only do the cleanup if we still have the target.
|
|
16925
|
-
const n = e.
|
|
16926
|
-
null !== n && (__PRIVATE_remoteStoreUnlisten(e.remoteStore, n), e.
|
|
16927
|
-
e.
|
|
16947
|
+
const n = e.Iu.get(t);
|
|
16948
|
+
null !== n && (__PRIVATE_remoteStoreUnlisten(e.remoteStore, n), e.Iu = e.Iu.remove(t),
|
|
16949
|
+
e.Eu.delete(n), __PRIVATE_pumpEnqueuedLimboResolutions(e));
|
|
16928
16950
|
}
|
|
16929
16951
|
|
|
16930
16952
|
function __PRIVATE_updateTrackedLimbos(e, t, n) {
|
|
16931
|
-
for (const r of n) if (r instanceof __PRIVATE_AddedLimboDocument) e.
|
|
16953
|
+
for (const r of n) if (r instanceof __PRIVATE_AddedLimboDocument) e.du.addReference(r.key, t),
|
|
16932
16954
|
__PRIVATE_trackLimboChange(e, r); else if (r instanceof __PRIVATE_RemovedLimboDocument) {
|
|
16933
|
-
__PRIVATE_logDebug(on, "Document no longer in limbo: " + r.key), e.
|
|
16934
|
-
e.
|
|
16955
|
+
__PRIVATE_logDebug(on, "Document no longer in limbo: " + r.key), e.du.removeReference(r.key, t);
|
|
16956
|
+
e.du.containsKey(r.key) ||
|
|
16935
16957
|
// We removed the last reference for this key
|
|
16936
16958
|
__PRIVATE_removeLimboTarget(e, r.key);
|
|
16937
16959
|
} else fail(19791, {
|
|
16938
|
-
|
|
16960
|
+
pu: r
|
|
16939
16961
|
});
|
|
16940
16962
|
}
|
|
16941
16963
|
|
|
16942
16964
|
function __PRIVATE_trackLimboChange(e, t) {
|
|
16943
16965
|
const n = t.key, r = n.path.canonicalString();
|
|
16944
|
-
e.
|
|
16945
|
-
e.
|
|
16966
|
+
e.Iu.get(n) || e.Tu.has(r) || (__PRIVATE_logDebug(on, "New document in limbo: " + n),
|
|
16967
|
+
e.Tu.add(r), __PRIVATE_pumpEnqueuedLimboResolutions(e));
|
|
16946
16968
|
}
|
|
16947
16969
|
|
|
16948
16970
|
/**
|
|
@@ -16953,18 +16975,18 @@ function __PRIVATE_trackLimboChange(e, t) {
|
|
|
16953
16975
|
* with "resource exhausted" errors which can lead to pathological client
|
|
16954
16976
|
* behavior as seen in https://github.com/firebase/firebase-js-sdk/issues/2683.
|
|
16955
16977
|
*/ function __PRIVATE_pumpEnqueuedLimboResolutions(e) {
|
|
16956
|
-
for (;e.
|
|
16957
|
-
const t = e.
|
|
16958
|
-
e.
|
|
16959
|
-
const n = new DocumentKey(ResourcePath.fromString(t)), r = e.
|
|
16960
|
-
e.
|
|
16978
|
+
for (;e.Tu.size > 0 && e.Iu.size < e.maxConcurrentLimboResolutions; ) {
|
|
16979
|
+
const t = e.Tu.values().next().value;
|
|
16980
|
+
e.Tu.delete(t);
|
|
16981
|
+
const n = new DocumentKey(ResourcePath.fromString(t)), r = e.Vu.next();
|
|
16982
|
+
e.Eu.set(r, new LimboResolution(n)), e.Iu = e.Iu.insert(n, r), __PRIVATE_remoteStoreListen(e.remoteStore, new TargetData(__PRIVATE_queryToTarget(__PRIVATE_newQueryForPath(n.path)), r, "TargetPurposeLimboResolution" /* TargetPurpose.LimboResolution */ , __PRIVATE_ListenSequence.le));
|
|
16961
16983
|
}
|
|
16962
16984
|
}
|
|
16963
16985
|
|
|
16964
16986
|
async function __PRIVATE_syncEngineEmitNewSnapsAndNotifyLocalStore(e, t, n) {
|
|
16965
16987
|
const r = __PRIVATE_debugCast(e), i = [], s = [], o = [];
|
|
16966
|
-
r.
|
|
16967
|
-
o.push(r.
|
|
16988
|
+
r.hu.isEmpty() || (r.hu.forEach(((e, _) => {
|
|
16989
|
+
o.push(r.fu(_, t, n).then((e => {
|
|
16968
16990
|
var t;
|
|
16969
16991
|
// If there are changes, or we are handling a global snapshot, notify
|
|
16970
16992
|
// secondary clients to update query state.
|
|
@@ -16982,7 +17004,7 @@ async function __PRIVATE_syncEngineEmitNewSnapsAndNotifyLocalStore(e, t, n) {
|
|
|
16982
17004
|
s.push(t);
|
|
16983
17005
|
}
|
|
16984
17006
|
})));
|
|
16985
|
-
})), await Promise.all(o), r.
|
|
17007
|
+
})), await Promise.all(o), r.lu.Y_(i), await async function __PRIVATE_localStoreNotifyLocalViewChanges(e, t) {
|
|
16986
17008
|
const n = __PRIVATE_debugCast(e);
|
|
16987
17009
|
try {
|
|
16988
17010
|
await n.persistence.runTransaction("notifyLocalViewChanges", "readwrite", (e => PersistencePromise.forEach(t, (t => PersistencePromise.forEach(t.ds, (r => n.persistence.referenceDelegate.addReference(e, t.targetId, r))).next((() => PersistencePromise.forEach(t.As, (r => n.persistence.referenceDelegate.removeReference(e, t.targetId, r)))))))));
|
|
@@ -17013,11 +17035,11 @@ async function __PRIVATE_syncEngineHandleCredentialChange(e, t) {
|
|
|
17013
17035
|
n.currentUser = t,
|
|
17014
17036
|
// Fails tasks waiting for pending writes requested by previous user.
|
|
17015
17037
|
function __PRIVATE_rejectOutstandingPendingWritesCallbacks(e, t) {
|
|
17016
|
-
e.
|
|
17038
|
+
e.Ru.forEach((e => {
|
|
17017
17039
|
e.forEach((e => {
|
|
17018
17040
|
e.reject(new FirestoreError(L.CANCELLED, t));
|
|
17019
17041
|
}));
|
|
17020
|
-
})), e.
|
|
17042
|
+
})), e.Ru.clear();
|
|
17021
17043
|
}(n, "'waitForPendingWrites' promise is rejected due to a user change."),
|
|
17022
17044
|
// TODO(b/114226417): Consider calling this only in the primary tab.
|
|
17023
17045
|
n.sharedClientState.handleUserChange(t, e.removedBatchIds, e.addedBatchIds), await __PRIVATE_syncEngineEmitNewSnapsAndNotifyLocalStore(n, e.ks);
|
|
@@ -17025,15 +17047,15 @@ async function __PRIVATE_syncEngineHandleCredentialChange(e, t) {
|
|
|
17025
17047
|
}
|
|
17026
17048
|
|
|
17027
17049
|
function __PRIVATE_syncEngineGetRemoteKeysForTarget(e, t) {
|
|
17028
|
-
const n = __PRIVATE_debugCast(e), r = n.
|
|
17029
|
-
if (r && r.
|
|
17050
|
+
const n = __PRIVATE_debugCast(e), r = n.Eu.get(t);
|
|
17051
|
+
if (r && r.cu) return __PRIVATE_documentKeySet().add(r.key);
|
|
17030
17052
|
{
|
|
17031
17053
|
let e = __PRIVATE_documentKeySet();
|
|
17032
|
-
const r = n.
|
|
17054
|
+
const r = n.Pu.get(t);
|
|
17033
17055
|
if (!r) return e;
|
|
17034
17056
|
for (const t of r) {
|
|
17035
|
-
const r = n.
|
|
17036
|
-
e = e.unionWith(r.view.
|
|
17057
|
+
const r = n.hu.get(t);
|
|
17058
|
+
e = e.unionWith(r.view.eu);
|
|
17037
17059
|
}
|
|
17038
17060
|
return e;
|
|
17039
17061
|
}
|
|
@@ -17044,8 +17066,8 @@ function __PRIVATE_syncEngineGetRemoteKeysForTarget(e, t) {
|
|
|
17044
17066
|
* from persistence.
|
|
17045
17067
|
*/ async function __PRIVATE_synchronizeViewAndComputeSnapshot(e, t) {
|
|
17046
17068
|
const n = __PRIVATE_debugCast(e), r = await __PRIVATE_localStoreExecuteQuery(n.localStore, t.query,
|
|
17047
|
-
/* usePreviousResults= */ !0), i = t.view.
|
|
17048
|
-
return n.isPrimaryClient && __PRIVATE_updateTrackedLimbos(n, t.targetId, i.
|
|
17069
|
+
/* usePreviousResults= */ !0), i = t.view.au(r);
|
|
17070
|
+
return n.isPrimaryClient && __PRIVATE_updateTrackedLimbos(n, t.targetId, i.ou),
|
|
17049
17071
|
i;
|
|
17050
17072
|
}
|
|
17051
17073
|
|
|
@@ -17081,7 +17103,7 @@ async function __PRIVATE_syncEngineApplyBatchState(e, t, n, r) {
|
|
|
17081
17103
|
}
|
|
17082
17104
|
// PORTING NOTE: Multi-Tab only.
|
|
17083
17105
|
(i.localStore, t)) : fail(6720, "Unknown batchState", {
|
|
17084
|
-
|
|
17106
|
+
yu: n
|
|
17085
17107
|
}), await __PRIVATE_syncEngineEmitNewSnapsAndNotifyLocalStore(i, s)) :
|
|
17086
17108
|
// A throttled tab may not have seen the mutation before it was completed
|
|
17087
17109
|
// and removed from the mutation queue, in which case we won't have cached
|
|
@@ -17098,7 +17120,7 @@ async function __PRIVATE_syncEngineApplyBatchState(e, t, n, r) {
|
|
|
17098
17120
|
async function __PRIVATE_syncEngineApplyPrimaryState(e, t) {
|
|
17099
17121
|
const n = __PRIVATE_debugCast(e);
|
|
17100
17122
|
if (__PRIVATE_ensureWatchCallbacks(n), __PRIVATE_syncEngineEnsureWriteCallbacks(n),
|
|
17101
|
-
!0 === t && !0 !== n.
|
|
17123
|
+
!0 === t && !0 !== n.mu) {
|
|
17102
17124
|
// Secondary tabs only maintain Views for their local listeners and the
|
|
17103
17125
|
// Views internal state may not be 100% populated (in particular
|
|
17104
17126
|
// secondary tabs don't track syncedDocuments, the set of documents the
|
|
@@ -17106,12 +17128,12 @@ async function __PRIVATE_syncEngineApplyPrimaryState(e, t) {
|
|
|
17106
17128
|
// primary, we need to need to make sure that all views for all targets
|
|
17107
17129
|
// match the state on disk.
|
|
17108
17130
|
const e = n.sharedClientState.getAllActiveQueryTargets(), t = await __PRIVATE_synchronizeQueryViewsAndRaiseSnapshots(n, e.toArray());
|
|
17109
|
-
n.
|
|
17131
|
+
n.mu = !0, await __PRIVATE_remoteStoreApplyPrimaryState(n.remoteStore, !0);
|
|
17110
17132
|
for (const e of t) __PRIVATE_remoteStoreListen(n.remoteStore, e);
|
|
17111
|
-
} else if (!1 === t && !1 !== n.
|
|
17133
|
+
} else if (!1 === t && !1 !== n.mu) {
|
|
17112
17134
|
const e = [];
|
|
17113
17135
|
let t = Promise.resolve();
|
|
17114
|
-
n.
|
|
17136
|
+
n.Pu.forEach(((r, i) => {
|
|
17115
17137
|
n.sharedClientState.isLocalQueryTarget(i) ? e.push(i) : t = t.then((() => (__PRIVATE_removeAndCleanupTarget(n, i),
|
|
17116
17138
|
__PRIVATE_localStoreReleaseTarget(n.localStore, i,
|
|
17117
17139
|
/*keepPersistedTargetData=*/ !0)))), __PRIVATE_remoteStoreUnlisten(n.remoteStore, i);
|
|
@@ -17119,9 +17141,9 @@ async function __PRIVATE_syncEngineApplyPrimaryState(e, t) {
|
|
|
17119
17141
|
// PORTING NOTE: Multi-Tab only.
|
|
17120
17142
|
function __PRIVATE_resetLimboDocuments(e) {
|
|
17121
17143
|
const t = __PRIVATE_debugCast(e);
|
|
17122
|
-
t.
|
|
17144
|
+
t.Eu.forEach(((e, n) => {
|
|
17123
17145
|
__PRIVATE_remoteStoreUnlisten(t.remoteStore, n);
|
|
17124
|
-
})), t.
|
|
17146
|
+
})), t.du.Jr(), t.Eu = new Map, t.Iu = new SortedMap(DocumentKey.comparator);
|
|
17125
17147
|
}
|
|
17126
17148
|
/**
|
|
17127
17149
|
* Reconcile the query views of the provided query targets with the state from
|
|
@@ -17134,7 +17156,7 @@ async function __PRIVATE_syncEngineApplyPrimaryState(e, t) {
|
|
|
17134
17156
|
* tab to a primary tab
|
|
17135
17157
|
*/
|
|
17136
17158
|
// PORTING NOTE: Multi-Tab only.
|
|
17137
|
-
(n), n.
|
|
17159
|
+
(n), n.mu = !1, await __PRIVATE_remoteStoreApplyPrimaryState(n.remoteStore, !1);
|
|
17138
17160
|
}
|
|
17139
17161
|
}
|
|
17140
17162
|
|
|
@@ -17142,7 +17164,7 @@ async function __PRIVATE_synchronizeQueryViewsAndRaiseSnapshots(e, t, n) {
|
|
|
17142
17164
|
const r = __PRIVATE_debugCast(e), i = [], s = [];
|
|
17143
17165
|
for (const e of t) {
|
|
17144
17166
|
let t;
|
|
17145
|
-
const n = r.
|
|
17167
|
+
const n = r.Pu.get(e);
|
|
17146
17168
|
if (n && 0 !== n.length) {
|
|
17147
17169
|
// For queries that have a local View, we fetch their current state
|
|
17148
17170
|
// from LocalStore (as the resume token and the snapshot version
|
|
@@ -17150,7 +17172,7 @@ async function __PRIVATE_synchronizeQueryViewsAndRaiseSnapshots(e, t, n) {
|
|
|
17150
17172
|
// state (the list of syncedDocuments may have gotten out of sync).
|
|
17151
17173
|
t = await __PRIVATE_localStoreAllocateTarget(r.localStore, __PRIVATE_queryToTarget(n[0]));
|
|
17152
17174
|
for (const e of n) {
|
|
17153
|
-
const t = r.
|
|
17175
|
+
const t = r.hu.get(e), n = await __PRIVATE_synchronizeViewAndComputeSnapshot(r, t);
|
|
17154
17176
|
n.snapshot && s.push(n.snapshot);
|
|
17155
17177
|
}
|
|
17156
17178
|
} else {
|
|
@@ -17162,7 +17184,7 @@ async function __PRIVATE_synchronizeQueryViewsAndRaiseSnapshots(e, t, n) {
|
|
|
17162
17184
|
}
|
|
17163
17185
|
i.push(t);
|
|
17164
17186
|
}
|
|
17165
|
-
return r.
|
|
17187
|
+
return r.lu.Y_(s), i;
|
|
17166
17188
|
}
|
|
17167
17189
|
|
|
17168
17190
|
/**
|
|
@@ -17192,11 +17214,11 @@ function __PRIVATE_syncEngineGetActiveClients(e) {
|
|
|
17192
17214
|
// PORTING NOTE: Multi-Tab only.
|
|
17193
17215
|
async function __PRIVATE_syncEngineApplyTargetState(e, t, n, r) {
|
|
17194
17216
|
const i = __PRIVATE_debugCast(e);
|
|
17195
|
-
if (i.
|
|
17217
|
+
if (i.mu)
|
|
17196
17218
|
// If we receive a target state notification via WebStorage, we are
|
|
17197
17219
|
// either already secondary or another tab has taken the primary lease.
|
|
17198
17220
|
return void __PRIVATE_logDebug(on, "Ignoring unexpected query state notification.");
|
|
17199
|
-
const s = i.
|
|
17221
|
+
const s = i.Pu.get(t);
|
|
17200
17222
|
if (s && s.length > 0) switch (n) {
|
|
17201
17223
|
case "current":
|
|
17202
17224
|
case "not-current":
|
|
@@ -17218,9 +17240,9 @@ async function __PRIVATE_syncEngineApplyTargetState(e, t, n, r) {
|
|
|
17218
17240
|
|
|
17219
17241
|
/** Adds or removes Watch targets for queries from different tabs. */ async function __PRIVATE_syncEngineApplyActiveTargetsChange(e, t, n) {
|
|
17220
17242
|
const r = __PRIVATE_ensureWatchCallbacks(e);
|
|
17221
|
-
if (r.
|
|
17243
|
+
if (r.mu) {
|
|
17222
17244
|
for (const e of t) {
|
|
17223
|
-
if (r.
|
|
17245
|
+
if (r.Pu.has(e) && r.sharedClientState.isActiveQueryTarget(e)) {
|
|
17224
17246
|
__PRIVATE_logDebug(on, "Adding an already active target " + e);
|
|
17225
17247
|
continue;
|
|
17226
17248
|
}
|
|
@@ -17231,7 +17253,7 @@ async function __PRIVATE_syncEngineApplyTargetState(e, t, n, r) {
|
|
|
17231
17253
|
for (const e of n)
|
|
17232
17254
|
// Check that the target is still active since the target might have been
|
|
17233
17255
|
// removed if it has been rejected by the backend.
|
|
17234
|
-
r.
|
|
17256
|
+
r.Pu.has(e) &&
|
|
17235
17257
|
// Release queries that are still active.
|
|
17236
17258
|
await __PRIVATE_localStoreReleaseTarget(r.localStore, e,
|
|
17237
17259
|
/* keepPersistedTargetData */ !1).then((() => {
|
|
@@ -17245,7 +17267,7 @@ function __PRIVATE_ensureWatchCallbacks(e) {
|
|
|
17245
17267
|
return t.remoteStore.remoteSyncer.applyRemoteEvent = __PRIVATE_syncEngineApplyRemoteEvent.bind(null, t),
|
|
17246
17268
|
t.remoteStore.remoteSyncer.getRemoteKeysForTarget = __PRIVATE_syncEngineGetRemoteKeysForTarget.bind(null, t),
|
|
17247
17269
|
t.remoteStore.remoteSyncer.rejectListen = __PRIVATE_syncEngineRejectListen.bind(null, t),
|
|
17248
|
-
t.
|
|
17270
|
+
t.lu.Y_ = __PRIVATE_eventManagerOnWatchChange.bind(null, t.eventManager), t.lu.gu = __PRIVATE_eventManagerOnWatchError.bind(null, t.eventManager),
|
|
17249
17271
|
t;
|
|
17250
17272
|
}
|
|
17251
17273
|
|
|
@@ -17288,13 +17310,13 @@ function __PRIVATE_syncEngineEnsureWriteCallbacks(e) {
|
|
|
17288
17310
|
}(r)), Promise.resolve(new Set);
|
|
17289
17311
|
n._updateProgress(__PRIVATE_bundleInitialProgress(r));
|
|
17290
17312
|
const i = new __PRIVATE_BundleLoader(r, e.localStore, t.serializer);
|
|
17291
|
-
let s = await t.
|
|
17313
|
+
let s = await t.wu();
|
|
17292
17314
|
for (;s; ) {
|
|
17293
|
-
const e = await i
|
|
17294
|
-
e && n._updateProgress(e), s = await t.
|
|
17315
|
+
const e = await i.Wa(s);
|
|
17316
|
+
e && n._updateProgress(e), s = await t.wu();
|
|
17295
17317
|
}
|
|
17296
17318
|
const o = await i.complete();
|
|
17297
|
-
return await __PRIVATE_syncEngineEmitNewSnapsAndNotifyLocalStore(e, o.
|
|
17319
|
+
return await __PRIVATE_syncEngineEmitNewSnapsAndNotifyLocalStore(e, o.ja,
|
|
17298
17320
|
/* remoteEvent */ void 0),
|
|
17299
17321
|
// Save metadata, so loading the same bundle will skip.
|
|
17300
17322
|
await function __PRIVATE_localStoreSaveBundle(e, t) {
|
|
@@ -17304,7 +17326,7 @@ function __PRIVATE_syncEngineEnsureWriteCallbacks(e) {
|
|
|
17304
17326
|
/**
|
|
17305
17327
|
* Returns a promise of a `NamedQuery` associated with given query name. Promise
|
|
17306
17328
|
* resolves to undefined if no persisted data can be found.
|
|
17307
|
-
*/ (e.localStore, r), n._completeWith(o.progress), Promise.resolve(o.
|
|
17329
|
+
*/ (e.localStore, r), n._completeWith(o.progress), Promise.resolve(o.za);
|
|
17308
17330
|
} catch (e) {
|
|
17309
17331
|
return __PRIVATE_logWarn(on, `Loading bundle failed with ${e}`), n._failWith(e),
|
|
17310
17332
|
Promise.resolve(new Set);
|
|
@@ -17339,23 +17361,23 @@ class __PRIVATE_MemoryOfflineComponentProvider {
|
|
|
17339
17361
|
this.kind = "memory", this.synchronizeTabs = !1;
|
|
17340
17362
|
}
|
|
17341
17363
|
async initialize(e) {
|
|
17342
|
-
this.serializer = __PRIVATE_newSerializer(e.databaseInfo.databaseId), this.sharedClientState = this.
|
|
17343
|
-
this.persistence = this.
|
|
17344
|
-
this.gcScheduler = this.
|
|
17364
|
+
this.serializer = __PRIVATE_newSerializer(e.databaseInfo.databaseId), this.sharedClientState = this.Su(e),
|
|
17365
|
+
this.persistence = this.bu(e), await this.persistence.start(), this.localStore = this.Du(e),
|
|
17366
|
+
this.gcScheduler = this.vu(e, this.localStore), this.indexBackfillerScheduler = this.Cu(e, this.localStore);
|
|
17345
17367
|
}
|
|
17346
|
-
|
|
17368
|
+
vu(e, t) {
|
|
17347
17369
|
return null;
|
|
17348
17370
|
}
|
|
17349
|
-
|
|
17371
|
+
Cu(e, t) {
|
|
17350
17372
|
return null;
|
|
17351
17373
|
}
|
|
17352
|
-
|
|
17374
|
+
Du(e) {
|
|
17353
17375
|
return __PRIVATE_newLocalStore(this.persistence, new __PRIVATE_QueryEngine, e.initialUser, this.serializer);
|
|
17354
17376
|
}
|
|
17355
|
-
|
|
17377
|
+
bu(e) {
|
|
17356
17378
|
return new __PRIVATE_MemoryPersistence(__PRIVATE_MemoryEagerDelegate.fi, this.serializer);
|
|
17357
17379
|
}
|
|
17358
|
-
|
|
17380
|
+
Su(e) {
|
|
17359
17381
|
return new __PRIVATE_MemorySharedClientState;
|
|
17360
17382
|
}
|
|
17361
17383
|
async terminate() {
|
|
@@ -17373,12 +17395,12 @@ class __PRIVATE_LruGcMemoryOfflineComponentProvider extends __PRIVATE_MemoryOffl
|
|
|
17373
17395
|
constructor(e) {
|
|
17374
17396
|
super(), this.cacheSizeBytes = e;
|
|
17375
17397
|
}
|
|
17376
|
-
|
|
17398
|
+
vu(e, t) {
|
|
17377
17399
|
__PRIVATE_hardAssert(this.persistence.referenceDelegate instanceof __PRIVATE_MemoryLruDelegate, 46915);
|
|
17378
17400
|
const n = this.persistence.referenceDelegate.garbageCollector;
|
|
17379
17401
|
return new __PRIVATE_LruScheduler(n, e.asyncQueue, t);
|
|
17380
17402
|
}
|
|
17381
|
-
|
|
17403
|
+
bu(e) {
|
|
17382
17404
|
const t = void 0 !== this.cacheSizeBytes ? LruParams.withCacheSize(this.cacheSizeBytes) : LruParams.DEFAULT;
|
|
17383
17405
|
return new __PRIVATE_MemoryPersistence((e => __PRIVATE_MemoryLruDelegate.fi(e, t)), this.serializer);
|
|
17384
17406
|
}
|
|
@@ -17388,35 +17410,35 @@ class __PRIVATE_LruGcMemoryOfflineComponentProvider extends __PRIVATE_MemoryOffl
|
|
|
17388
17410
|
* Provides all components needed for Firestore with IndexedDB persistence.
|
|
17389
17411
|
*/ class __PRIVATE_IndexedDbOfflineComponentProvider extends __PRIVATE_MemoryOfflineComponentProvider {
|
|
17390
17412
|
constructor(e, t, n) {
|
|
17391
|
-
super(), this.
|
|
17413
|
+
super(), this.Fu = e, this.cacheSizeBytes = t, this.forceOwnership = n, this.kind = "persistent",
|
|
17392
17414
|
this.synchronizeTabs = !1;
|
|
17393
17415
|
}
|
|
17394
17416
|
async initialize(e) {
|
|
17395
|
-
await super.initialize(e), await this.
|
|
17417
|
+
await super.initialize(e), await this.Fu.initialize(this, e),
|
|
17396
17418
|
// Enqueue writes from a previous session
|
|
17397
|
-
await __PRIVATE_syncEngineEnsureWriteCallbacks(this.
|
|
17419
|
+
await __PRIVATE_syncEngineEnsureWriteCallbacks(this.Fu.syncEngine), await __PRIVATE_fillWritePipeline(this.Fu.remoteStore),
|
|
17398
17420
|
// NOTE: This will immediately call the listener, so we make sure to
|
|
17399
17421
|
// set it after localStore / remoteStore are started.
|
|
17400
17422
|
await this.persistence.Ji((() => (this.gcScheduler && !this.gcScheduler.started && this.gcScheduler.start(),
|
|
17401
17423
|
this.indexBackfillerScheduler && !this.indexBackfillerScheduler.started && this.indexBackfillerScheduler.start(),
|
|
17402
17424
|
Promise.resolve())));
|
|
17403
17425
|
}
|
|
17404
|
-
|
|
17426
|
+
Du(e) {
|
|
17405
17427
|
return __PRIVATE_newLocalStore(this.persistence, new __PRIVATE_QueryEngine, e.initialUser, this.serializer);
|
|
17406
17428
|
}
|
|
17407
|
-
|
|
17429
|
+
vu(e, t) {
|
|
17408
17430
|
const n = this.persistence.referenceDelegate.garbageCollector;
|
|
17409
17431
|
return new __PRIVATE_LruScheduler(n, e.asyncQueue, t);
|
|
17410
17432
|
}
|
|
17411
|
-
|
|
17433
|
+
Cu(e, t) {
|
|
17412
17434
|
const n = new __PRIVATE_IndexBackfiller(t, this.persistence);
|
|
17413
17435
|
return new __PRIVATE_IndexBackfillerScheduler(e.asyncQueue, n);
|
|
17414
17436
|
}
|
|
17415
|
-
|
|
17437
|
+
bu(e) {
|
|
17416
17438
|
const t = __PRIVATE_indexedDbStoragePrefix(e.databaseInfo.databaseId, e.databaseInfo.persistenceKey), n = void 0 !== this.cacheSizeBytes ? LruParams.withCacheSize(this.cacheSizeBytes) : LruParams.DEFAULT;
|
|
17417
17439
|
return new __PRIVATE_IndexedDbPersistence(this.synchronizeTabs, t, e.clientId, n, e.asyncQueue, __PRIVATE_getWindow(), getDocument(), this.serializer, this.sharedClientState, !!this.forceOwnership);
|
|
17418
17440
|
}
|
|
17419
|
-
|
|
17441
|
+
Su(e) {
|
|
17420
17442
|
return new __PRIVATE_MemorySharedClientState;
|
|
17421
17443
|
}
|
|
17422
17444
|
}
|
|
@@ -17430,11 +17452,11 @@ class __PRIVATE_LruGcMemoryOfflineComponentProvider extends __PRIVATE_MemoryOffl
|
|
|
17430
17452
|
* `synchronizeTabs` will be enabled.
|
|
17431
17453
|
*/ class __PRIVATE_MultiTabOfflineComponentProvider extends __PRIVATE_IndexedDbOfflineComponentProvider {
|
|
17432
17454
|
constructor(e, t) {
|
|
17433
|
-
super(e, t, /* forceOwnership= */ !1), this.
|
|
17455
|
+
super(e, t, /* forceOwnership= */ !1), this.Fu = e, this.cacheSizeBytes = t, this.synchronizeTabs = !0;
|
|
17434
17456
|
}
|
|
17435
17457
|
async initialize(e) {
|
|
17436
17458
|
await super.initialize(e);
|
|
17437
|
-
const t = this.
|
|
17459
|
+
const t = this.Fu.syncEngine;
|
|
17438
17460
|
this.sharedClientState instanceof __PRIVATE_WebStorageSharedClientState && (this.sharedClientState.syncEngine = {
|
|
17439
17461
|
Co: __PRIVATE_syncEngineApplyBatchState.bind(null, t),
|
|
17440
17462
|
Fo: __PRIVATE_syncEngineApplyTargetState.bind(null, t),
|
|
@@ -17445,11 +17467,11 @@ class __PRIVATE_LruGcMemoryOfflineComponentProvider extends __PRIVATE_MemoryOffl
|
|
|
17445
17467
|
// NOTE: This will immediately call the listener, so we make sure to
|
|
17446
17468
|
// set it after localStore / remoteStore are started.
|
|
17447
17469
|
await this.persistence.Ji((async e => {
|
|
17448
|
-
await __PRIVATE_syncEngineApplyPrimaryState(this.
|
|
17470
|
+
await __PRIVATE_syncEngineApplyPrimaryState(this.Fu.syncEngine, e), this.gcScheduler && (e && !this.gcScheduler.started ? this.gcScheduler.start() : e || this.gcScheduler.stop()),
|
|
17449
17471
|
this.indexBackfillerScheduler && (e && !this.indexBackfillerScheduler.started ? this.indexBackfillerScheduler.start() : e || this.indexBackfillerScheduler.stop());
|
|
17450
17472
|
}));
|
|
17451
17473
|
}
|
|
17452
|
-
|
|
17474
|
+
Su(e) {
|
|
17453
17475
|
const t = __PRIVATE_getWindow();
|
|
17454
17476
|
if (!__PRIVATE_WebStorageSharedClientState.C(t)) throw new FirestoreError(L.UNIMPLEMENTED, "IndexedDB persistence is only available on platforms that support LocalStorage.");
|
|
17455
17477
|
const n = __PRIVATE_indexedDbStoragePrefix(e.databaseInfo.databaseId, e.databaseInfo.persistenceKey);
|
|
@@ -17496,18 +17518,18 @@ class __PRIVATE_LruGcMemoryOfflineComponentProvider extends __PRIVATE_MemoryOffl
|
|
|
17496
17518
|
// PORTING NOTE: Manages state synchronization in multi-tab environments.
|
|
17497
17519
|
r, i, s, o) {
|
|
17498
17520
|
const _ = new __PRIVATE_SyncEngineImpl(e, t, n, r, i, s);
|
|
17499
|
-
return o && (_.
|
|
17521
|
+
return o && (_.mu = !0), _;
|
|
17500
17522
|
}(this.localStore, this.remoteStore, this.eventManager, this.sharedClientState, e.initialUser, e.maxConcurrentLimboResolutions, t);
|
|
17501
17523
|
}
|
|
17502
17524
|
async terminate() {
|
|
17503
17525
|
var e, t;
|
|
17504
17526
|
await async function __PRIVATE_remoteStoreShutdown(e) {
|
|
17505
17527
|
const t = __PRIVATE_debugCast(e);
|
|
17506
|
-
__PRIVATE_logDebug(nn, "RemoteStore shutting down."), t.
|
|
17507
|
-
await __PRIVATE_disableNetworkInternal(t), t.
|
|
17528
|
+
__PRIVATE_logDebug(nn, "RemoteStore shutting down."), t.da.add(5 /* OfflineCause.Shutdown */),
|
|
17529
|
+
await __PRIVATE_disableNetworkInternal(t), t.Ra.shutdown(),
|
|
17508
17530
|
// Set the OnlineState to Unknown (rather than Offline) to avoid potentially
|
|
17509
17531
|
// triggering spurious listener events with cached data, etc.
|
|
17510
|
-
t.
|
|
17532
|
+
t.Va.set("Unknown" /* OnlineState.Unknown */);
|
|
17511
17533
|
}(this.remoteStore), null === (e = this.datastore) || void 0 === e || e.terminate(),
|
|
17512
17534
|
null === (t = this.eventManager) || void 0 === t || t.terminate();
|
|
17513
17535
|
}
|
|
@@ -17599,15 +17621,15 @@ class __PRIVATE_AsyncObserver {
|
|
|
17599
17621
|
this.muted = !1;
|
|
17600
17622
|
}
|
|
17601
17623
|
next(e) {
|
|
17602
|
-
this.muted || this.observer.next && this.
|
|
17624
|
+
this.muted || this.observer.next && this.Mu(this.observer.next, e);
|
|
17603
17625
|
}
|
|
17604
17626
|
error(e) {
|
|
17605
|
-
this.muted || (this.observer.error ? this.
|
|
17627
|
+
this.muted || (this.observer.error ? this.Mu(this.observer.error, e) : __PRIVATE_logError("Uncaught Error in snapshot listener:", e.toString()));
|
|
17606
17628
|
}
|
|
17607
|
-
|
|
17629
|
+
xu() {
|
|
17608
17630
|
this.muted = !0;
|
|
17609
17631
|
}
|
|
17610
|
-
|
|
17632
|
+
Mu(e, t) {
|
|
17611
17633
|
setTimeout((() => {
|
|
17612
17634
|
this.muted || e(t);
|
|
17613
17635
|
}), 0);
|
|
@@ -17639,30 +17661,30 @@ class __PRIVATE_AsyncObserver {
|
|
|
17639
17661
|
constructor(
|
|
17640
17662
|
/** The reader to read from underlying binary bundle data source. */
|
|
17641
17663
|
e, t) {
|
|
17642
|
-
this.
|
|
17664
|
+
this.Ou = e, this.serializer = t,
|
|
17643
17665
|
/** Cached bundle metadata. */
|
|
17644
17666
|
this.metadata = new __PRIVATE_Deferred,
|
|
17645
17667
|
/**
|
|
17646
17668
|
* Internal buffer to hold bundle content, accumulating incomplete element
|
|
17647
17669
|
* content.
|
|
17648
17670
|
*/
|
|
17649
|
-
this.buffer = new Uint8Array, this.
|
|
17671
|
+
this.buffer = new Uint8Array, this.Nu = function __PRIVATE_newTextDecoder() {
|
|
17650
17672
|
return new TextDecoder("utf-8");
|
|
17651
17673
|
}(),
|
|
17652
17674
|
// Read the metadata (which is the first element).
|
|
17653
|
-
this.
|
|
17654
|
-
e && e.
|
|
17675
|
+
this.Bu().then((e => {
|
|
17676
|
+
e && e.Ua() ? this.metadata.resolve(e.$a.metadata) : this.metadata.reject(new Error(`The first element of the bundle is not a metadata, it is\n ${JSON.stringify(null == e ? void 0 : e.$a)}`));
|
|
17655
17677
|
}), (e => this.metadata.reject(e)));
|
|
17656
17678
|
}
|
|
17657
17679
|
close() {
|
|
17658
|
-
return this.
|
|
17680
|
+
return this.Ou.cancel();
|
|
17659
17681
|
}
|
|
17660
17682
|
async getMetadata() {
|
|
17661
17683
|
return this.metadata.promise;
|
|
17662
17684
|
}
|
|
17663
|
-
async
|
|
17685
|
+
async wu() {
|
|
17664
17686
|
// Makes sure metadata is read before proceeding.
|
|
17665
|
-
return await this.getMetadata(), this.
|
|
17687
|
+
return await this.getMetadata(), this.Bu();
|
|
17666
17688
|
}
|
|
17667
17689
|
/**
|
|
17668
17690
|
* Reads from the head of internal buffer, and pulling more data from
|
|
@@ -17673,15 +17695,15 @@ class __PRIVATE_AsyncObserver {
|
|
|
17673
17695
|
*
|
|
17674
17696
|
* Returns either the bundled element, or null if we have reached the end of
|
|
17675
17697
|
* the stream.
|
|
17676
|
-
*/ async
|
|
17677
|
-
const e = await this.
|
|
17698
|
+
*/ async Bu() {
|
|
17699
|
+
const e = await this.Lu();
|
|
17678
17700
|
if (null === e) return null;
|
|
17679
|
-
const t = this.
|
|
17680
|
-
isNaN(n) && this.
|
|
17681
|
-
const r = await this.
|
|
17701
|
+
const t = this.Nu.decode(e), n = Number(t);
|
|
17702
|
+
isNaN(n) && this.ku(`length string (${t}) is not valid number`);
|
|
17703
|
+
const r = await this.qu(n);
|
|
17682
17704
|
return new __PRIVATE_SizedBundleElement(JSON.parse(r), e.length + n);
|
|
17683
17705
|
}
|
|
17684
|
-
/** First index of '{' from the underlying buffer. */
|
|
17706
|
+
/** First index of '{' from the underlying buffer. */ Qu() {
|
|
17685
17707
|
return this.buffer.findIndex((e => e === "{".charCodeAt(0)));
|
|
17686
17708
|
}
|
|
17687
17709
|
/**
|
|
@@ -17689,17 +17711,17 @@ class __PRIVATE_AsyncObserver {
|
|
|
17689
17711
|
* return the content.
|
|
17690
17712
|
*
|
|
17691
17713
|
* If reached end of the stream, returns a null.
|
|
17692
|
-
*/ async
|
|
17693
|
-
for (;this.
|
|
17694
|
-
if (await this
|
|
17714
|
+
*/ async Lu() {
|
|
17715
|
+
for (;this.Qu() < 0; ) {
|
|
17716
|
+
if (await this.$u()) break;
|
|
17695
17717
|
}
|
|
17696
17718
|
// Broke out of the loop because underlying stream is closed, and there
|
|
17697
17719
|
// happens to be no more data to process.
|
|
17698
17720
|
if (0 === this.buffer.length) return null;
|
|
17699
|
-
const e = this.
|
|
17721
|
+
const e = this.Qu();
|
|
17700
17722
|
// Broke out of the loop because underlying stream is closed, but still
|
|
17701
17723
|
// cannot find an open bracket.
|
|
17702
|
-
e < 0 && this.
|
|
17724
|
+
e < 0 && this.ku("Reached the end of bundle when a length string is expected.");
|
|
17703
17725
|
const t = this.buffer.slice(0, e);
|
|
17704
17726
|
// Update the internal buffer to drop the read length.
|
|
17705
17727
|
return this.buffer = this.buffer.slice(e), t;
|
|
@@ -17709,23 +17731,23 @@ class __PRIVATE_AsyncObserver {
|
|
|
17709
17731
|
* number of bytes, pulling more data from the underlying stream if needed.
|
|
17710
17732
|
*
|
|
17711
17733
|
* Returns a string decoded from the read bytes.
|
|
17712
|
-
*/ async
|
|
17734
|
+
*/ async qu(e) {
|
|
17713
17735
|
for (;this.buffer.length < e; ) {
|
|
17714
|
-
await this
|
|
17736
|
+
await this.$u() && this.ku("Reached the end of bundle when more is expected.");
|
|
17715
17737
|
}
|
|
17716
|
-
const t = this.
|
|
17738
|
+
const t = this.Nu.decode(this.buffer.slice(0, e));
|
|
17717
17739
|
// Update the internal buffer to drop the read json string.
|
|
17718
17740
|
return this.buffer = this.buffer.slice(e), t;
|
|
17719
17741
|
}
|
|
17720
|
-
|
|
17742
|
+
ku(e) {
|
|
17721
17743
|
// eslint-disable-next-line @typescript-eslint/no-floating-promises
|
|
17722
|
-
throw this.
|
|
17744
|
+
throw this.Ou.cancel(), new Error(`Invalid bundle format: ${e}`);
|
|
17723
17745
|
}
|
|
17724
17746
|
/**
|
|
17725
17747
|
* Pulls more data from underlying stream to internal buffer.
|
|
17726
17748
|
* Returns a boolean indicating whether the stream is finished.
|
|
17727
|
-
*/ async
|
|
17728
|
-
const e = await this.
|
|
17749
|
+
*/ async $u() {
|
|
17750
|
+
const e = await this.Ou.read();
|
|
17729
17751
|
if (!e.done) {
|
|
17730
17752
|
const t = new Uint8Array(this.buffer.length + e.value.length);
|
|
17731
17753
|
t.set(this.buffer), t.set(e.value, this.buffer.length), this.buffer = t;
|
|
@@ -17830,7 +17852,7 @@ class Transaction$2 {
|
|
|
17830
17852
|
let t;
|
|
17831
17853
|
if (e.isFoundDocument()) t = e.version; else {
|
|
17832
17854
|
if (!e.isNoDocument()) throw fail(50498, {
|
|
17833
|
-
|
|
17855
|
+
Uu: e.constructor.name
|
|
17834
17856
|
});
|
|
17835
17857
|
// Represent a deleted doc using SnapshotVersion.min().
|
|
17836
17858
|
t = SnapshotVersion.min();
|
|
@@ -17902,26 +17924,26 @@ class Transaction$2 {
|
|
|
17902
17924
|
*/ class __PRIVATE_TransactionRunner {
|
|
17903
17925
|
constructor(e, t, n, r, i) {
|
|
17904
17926
|
this.asyncQueue = e, this.datastore = t, this.options = n, this.updateFunction = r,
|
|
17905
|
-
this.deferred = i, this.
|
|
17927
|
+
this.deferred = i, this.Ku = n.maxAttempts, this.x_ = new __PRIVATE_ExponentialBackoff(this.asyncQueue, "transaction_retry" /* TimerId.TransactionRetry */);
|
|
17906
17928
|
}
|
|
17907
|
-
/** Runs the transaction and sets the result on deferred. */
|
|
17908
|
-
this.
|
|
17929
|
+
/** Runs the transaction and sets the result on deferred. */ Wu() {
|
|
17930
|
+
this.Ku -= 1, this.Gu();
|
|
17909
17931
|
}
|
|
17910
|
-
|
|
17911
|
-
this.
|
|
17912
|
-
const e = new Transaction$2(this.datastore), t = this.
|
|
17932
|
+
Gu() {
|
|
17933
|
+
this.x_.y_((async () => {
|
|
17934
|
+
const e = new Transaction$2(this.datastore), t = this.zu(e);
|
|
17913
17935
|
t && t.then((t => {
|
|
17914
17936
|
this.asyncQueue.enqueueAndForget((() => e.commit().then((() => {
|
|
17915
17937
|
this.deferred.resolve(t);
|
|
17916
17938
|
})).catch((e => {
|
|
17917
|
-
this.
|
|
17939
|
+
this.ju(e);
|
|
17918
17940
|
}))));
|
|
17919
17941
|
})).catch((e => {
|
|
17920
|
-
this.
|
|
17942
|
+
this.ju(e);
|
|
17921
17943
|
}));
|
|
17922
17944
|
}));
|
|
17923
17945
|
}
|
|
17924
|
-
|
|
17946
|
+
zu(e) {
|
|
17925
17947
|
try {
|
|
17926
17948
|
const t = this.updateFunction(e);
|
|
17927
17949
|
return !__PRIVATE_isNullOrUndefined(t) && t.catch && t.then ? t : (this.deferred.reject(Error("Transaction callback must return a Promise")),
|
|
@@ -17931,11 +17953,11 @@ class Transaction$2 {
|
|
|
17931
17953
|
return this.deferred.reject(e), null;
|
|
17932
17954
|
}
|
|
17933
17955
|
}
|
|
17934
|
-
|
|
17935
|
-
this.
|
|
17956
|
+
ju(e) {
|
|
17957
|
+
this.Ku > 0 && this.Hu(e) ? (this.Ku -= 1, this.asyncQueue.enqueueAndForget((() => (this.Gu(),
|
|
17936
17958
|
Promise.resolve())))) : this.deferred.reject(e);
|
|
17937
17959
|
}
|
|
17938
|
-
|
|
17960
|
+
Hu(e) {
|
|
17939
17961
|
if ("FirebaseError" === e.name) {
|
|
17940
17962
|
// In transactions, the backend will fail outdated reads with FAILED_PRECONDITION and
|
|
17941
17963
|
// non-matching document versions with ABORTED. These errors should be retried.
|
|
@@ -18113,7 +18135,7 @@ async function __PRIVATE_getEventManager(e) {
|
|
|
18113
18135
|
const t = await __PRIVATE_getPersistence(e), n = await __PRIVATE_getRemoteStore(e);
|
|
18114
18136
|
return t.setNetworkEnabled(!0), function __PRIVATE_remoteStoreEnableNetwork(e) {
|
|
18115
18137
|
const t = __PRIVATE_debugCast(e);
|
|
18116
|
-
return t.
|
|
18138
|
+
return t.da.delete(0 /* OfflineCause.UserDisabled */), __PRIVATE_enableNetworkInternal(t);
|
|
18117
18139
|
}(n);
|
|
18118
18140
|
}));
|
|
18119
18141
|
}
|
|
@@ -18123,9 +18145,9 @@ async function __PRIVATE_getEventManager(e) {
|
|
|
18123
18145
|
const t = await __PRIVATE_getPersistence(e), n = await __PRIVATE_getRemoteStore(e);
|
|
18124
18146
|
return t.setNetworkEnabled(!1), async function __PRIVATE_remoteStoreDisableNetwork(e) {
|
|
18125
18147
|
const t = __PRIVATE_debugCast(e);
|
|
18126
|
-
t.
|
|
18148
|
+
t.da.add(0 /* OfflineCause.UserDisabled */), await __PRIVATE_disableNetworkInternal(t),
|
|
18127
18149
|
// Set the OnlineState to Offline so get()s return from cache, etc.
|
|
18128
|
-
t.
|
|
18150
|
+
t.Va.set("Offline" /* OnlineState.Offline */);
|
|
18129
18151
|
}(n);
|
|
18130
18152
|
}));
|
|
18131
18153
|
}
|
|
@@ -18161,7 +18183,7 @@ function __PRIVATE_firestoreClientGetDocumentViaSnapshotListener(e, t, n = {}) {
|
|
|
18161
18183
|
next: _ => {
|
|
18162
18184
|
// Mute and remove query first before passing event to user to avoid
|
|
18163
18185
|
// user actions affecting the now stale query.
|
|
18164
|
-
s.
|
|
18186
|
+
s.xu(), t.enqueueAndForget((() => __PRIVATE_eventManagerUnlisten(e, o)));
|
|
18165
18187
|
const a = _.docs.has(n);
|
|
18166
18188
|
!a && _.fromCache ?
|
|
18167
18189
|
// TODO(dimond): If we're online and the document doesn't
|
|
@@ -18176,7 +18198,7 @@ function __PRIVATE_firestoreClientGetDocumentViaSnapshotListener(e, t, n = {}) {
|
|
|
18176
18198
|
error: e => i.reject(e)
|
|
18177
18199
|
}), o = new __PRIVATE_QueryListener(__PRIVATE_newQueryForPath(n.path), s, {
|
|
18178
18200
|
includeMetadataChanges: !0,
|
|
18179
|
-
|
|
18201
|
+
Qa: !0
|
|
18180
18202
|
});
|
|
18181
18203
|
return __PRIVATE_eventManagerListen(e, o);
|
|
18182
18204
|
}(await __PRIVATE_getEventManager(e), e.asyncQueue, t, n, r))), r.promise;
|
|
@@ -18187,7 +18209,7 @@ function __PRIVATE_firestoreClientGetDocumentsFromLocalCache(e, t) {
|
|
|
18187
18209
|
return e.asyncQueue.enqueueAndForget((async () => async function __PRIVATE_executeQueryFromCache(e, t, n) {
|
|
18188
18210
|
try {
|
|
18189
18211
|
const r = await __PRIVATE_localStoreExecuteQuery(e, t,
|
|
18190
|
-
/* usePreviousResults= */ !0), i = new __PRIVATE_View(t, r.$s), s = i.
|
|
18212
|
+
/* usePreviousResults= */ !0), i = new __PRIVATE_View(t, r.$s), s = i.tu(r.documents), o = i.applyChanges(s,
|
|
18191
18213
|
/* limboResolutionEnabled= */ !1);
|
|
18192
18214
|
n.resolve(o.snapshot);
|
|
18193
18215
|
} catch (e) {
|
|
@@ -18208,12 +18230,12 @@ function __PRIVATE_firestoreClientGetDocumentsViaSnapshotListener(e, t, n = {})
|
|
|
18208
18230
|
next: n => {
|
|
18209
18231
|
// Mute and remove query first before passing event to user to avoid
|
|
18210
18232
|
// user actions affecting the now stale query.
|
|
18211
|
-
s.
|
|
18233
|
+
s.xu(), t.enqueueAndForget((() => __PRIVATE_eventManagerUnlisten(e, o))), n.fromCache && "server" === r.source ? i.reject(new FirestoreError(L.UNAVAILABLE, 'Failed to get documents from server. (However, these documents may exist in the local cache. Run again without setting source to "server" to retrieve the cached documents.)')) : i.resolve(n);
|
|
18212
18234
|
},
|
|
18213
18235
|
error: e => i.reject(e)
|
|
18214
18236
|
}), o = new __PRIVATE_QueryListener(n, s, {
|
|
18215
18237
|
includeMetadataChanges: !0,
|
|
18216
|
-
|
|
18238
|
+
Qa: !0
|
|
18217
18239
|
});
|
|
18218
18240
|
return __PRIVATE_eventManagerListen(e, o);
|
|
18219
18241
|
}(await __PRIVATE_getEventManager(e), e.asyncQueue, t, n, r))), r.promise;
|
|
@@ -18251,13 +18273,13 @@ function __PRIVATE_firestoreClientRunAggregateQuery(e, t, n) {
|
|
|
18251
18273
|
function __PRIVATE_firestoreClientAddSnapshotsInSyncListener(e, t) {
|
|
18252
18274
|
const n = new __PRIVATE_AsyncObserver(t);
|
|
18253
18275
|
return e.asyncQueue.enqueueAndForget((async () => function __PRIVATE_addSnapshotsInSyncListener(e, t) {
|
|
18254
|
-
__PRIVATE_debugCast(e).
|
|
18276
|
+
__PRIVATE_debugCast(e).Ca.add(t),
|
|
18255
18277
|
// Immediately fire an initial event, indicating all existing listeners
|
|
18256
18278
|
// are in-sync.
|
|
18257
18279
|
t.next();
|
|
18258
18280
|
}(await __PRIVATE_getEventManager(e), n))), () => {
|
|
18259
|
-
n.
|
|
18260
|
-
__PRIVATE_debugCast(e).
|
|
18281
|
+
n.xu(), e.asyncQueue.enqueueAndForget((async () => function __PRIVATE_removeSnapshotsInSyncListener(e, t) {
|
|
18282
|
+
__PRIVATE_debugCast(e).Ca.delete(t);
|
|
18261
18283
|
}(await __PRIVATE_getEventManager(e), n)));
|
|
18262
18284
|
};
|
|
18263
18285
|
}
|
|
@@ -18945,37 +18967,37 @@ class __PRIVATE_AsyncQueueImpl {
|
|
|
18945
18967
|
constructor(e = Promise.resolve()) {
|
|
18946
18968
|
// A list of retryable operations. Retryable operations are run in order and
|
|
18947
18969
|
// retried with backoff.
|
|
18948
|
-
this.
|
|
18970
|
+
this.Ju = [],
|
|
18949
18971
|
// Is this AsyncQueue being shut down? Once it is set to true, it will not
|
|
18950
18972
|
// be changed again.
|
|
18951
|
-
this.
|
|
18973
|
+
this.Yu = !1,
|
|
18952
18974
|
// Operations scheduled to be queued in the future. Operations are
|
|
18953
18975
|
// automatically removed after they are run or canceled.
|
|
18954
|
-
this.
|
|
18976
|
+
this.Zu = [],
|
|
18955
18977
|
// visible for testing
|
|
18956
|
-
this.
|
|
18978
|
+
this.Xu = null,
|
|
18957
18979
|
// Flag set while there's an outstanding AsyncQueue operation, used for
|
|
18958
18980
|
// assertion sanity-checks.
|
|
18959
|
-
this.
|
|
18981
|
+
this.ec = !1,
|
|
18960
18982
|
// Enabled during shutdown on Safari to prevent future access to IndexedDB.
|
|
18961
|
-
this.
|
|
18983
|
+
this.tc = !1,
|
|
18962
18984
|
// List of TimerIds to fast-forward delays for.
|
|
18963
|
-
this.
|
|
18985
|
+
this.nc = [],
|
|
18964
18986
|
// Backoff timer used to schedule retries for retryable operations
|
|
18965
|
-
this.
|
|
18987
|
+
this.x_ = new __PRIVATE_ExponentialBackoff(this, "async_queue_retry" /* TimerId.AsyncQueueRetry */),
|
|
18966
18988
|
// Visibility handler that triggers an immediate retry of all retryable
|
|
18967
18989
|
// operations. Meant to speed up recovery when we regain file system access
|
|
18968
18990
|
// after page comes into foreground.
|
|
18969
|
-
this.
|
|
18991
|
+
this.rc = () => {
|
|
18970
18992
|
const e = getDocument();
|
|
18971
18993
|
e && __PRIVATE_logDebug(ln, "Visibility state changed to " + e.visibilityState),
|
|
18972
|
-
this.
|
|
18973
|
-
}, this.
|
|
18994
|
+
this.x_.S_();
|
|
18995
|
+
}, this.sc = e;
|
|
18974
18996
|
const t = getDocument();
|
|
18975
|
-
t && "function" == typeof t.addEventListener && t.addEventListener("visibilitychange", this.
|
|
18997
|
+
t && "function" == typeof t.addEventListener && t.addEventListener("visibilitychange", this.rc);
|
|
18976
18998
|
}
|
|
18977
18999
|
get isShuttingDown() {
|
|
18978
|
-
return this.
|
|
19000
|
+
return this.Yu;
|
|
18979
19001
|
}
|
|
18980
19002
|
/**
|
|
18981
19003
|
* Adds a new operation to the queue without waiting for it to complete (i.e.
|
|
@@ -18985,44 +19007,44 @@ class __PRIVATE_AsyncQueueImpl {
|
|
|
18985
19007
|
this.enqueue(e);
|
|
18986
19008
|
}
|
|
18987
19009
|
enqueueAndForgetEvenWhileRestricted(e) {
|
|
18988
|
-
this.
|
|
19010
|
+
this.oc(),
|
|
18989
19011
|
// eslint-disable-next-line @typescript-eslint/no-floating-promises
|
|
18990
|
-
this.
|
|
19012
|
+
this._c(e);
|
|
18991
19013
|
}
|
|
18992
19014
|
enterRestrictedMode(e) {
|
|
18993
|
-
if (!this.
|
|
18994
|
-
this.
|
|
19015
|
+
if (!this.Yu) {
|
|
19016
|
+
this.Yu = !0, this.tc = e || !1;
|
|
18995
19017
|
const t = getDocument();
|
|
18996
|
-
t && "function" == typeof t.removeEventListener && t.removeEventListener("visibilitychange", this.
|
|
19018
|
+
t && "function" == typeof t.removeEventListener && t.removeEventListener("visibilitychange", this.rc);
|
|
18997
19019
|
}
|
|
18998
19020
|
}
|
|
18999
19021
|
enqueue(e) {
|
|
19000
|
-
if (this.
|
|
19022
|
+
if (this.oc(), this.Yu)
|
|
19001
19023
|
// Return a Promise which never resolves.
|
|
19002
19024
|
return new Promise((() => {}));
|
|
19003
19025
|
// Create a deferred Promise that we can return to the callee. This
|
|
19004
19026
|
// allows us to return a "hanging Promise" only to the callee and still
|
|
19005
19027
|
// advance the queue even when the operation is not run.
|
|
19006
19028
|
const t = new __PRIVATE_Deferred;
|
|
19007
|
-
return this.
|
|
19029
|
+
return this._c((() => this.Yu && this.tc ? Promise.resolve() : (e().then(t.resolve, t.reject),
|
|
19008
19030
|
t.promise))).then((() => t.promise));
|
|
19009
19031
|
}
|
|
19010
19032
|
enqueueRetryable(e) {
|
|
19011
|
-
this.enqueueAndForget((() => (this.
|
|
19033
|
+
this.enqueueAndForget((() => (this.Ju.push(e), this.ac())));
|
|
19012
19034
|
}
|
|
19013
19035
|
/**
|
|
19014
19036
|
* Runs the next operation from the retryable queue. If the operation fails,
|
|
19015
19037
|
* reschedules with backoff.
|
|
19016
|
-
*/ async
|
|
19017
|
-
if (0 !== this.
|
|
19038
|
+
*/ async ac() {
|
|
19039
|
+
if (0 !== this.Ju.length) {
|
|
19018
19040
|
try {
|
|
19019
|
-
await this.
|
|
19041
|
+
await this.Ju[0](), this.Ju.shift(), this.x_.reset();
|
|
19020
19042
|
} catch (e) {
|
|
19021
19043
|
if (!__PRIVATE_isIndexedDbTransactionError(e)) throw e;
|
|
19022
19044
|
// Failure will be handled by AsyncQueue
|
|
19023
19045
|
__PRIVATE_logDebug(ln, "Operation failed with retryable error: " + e);
|
|
19024
19046
|
}
|
|
19025
|
-
this.
|
|
19047
|
+
this.Ju.length > 0 &&
|
|
19026
19048
|
// If there are additional operations, we re-schedule `retryNextOp()`.
|
|
19027
19049
|
// This is necessary to run retryable operations that failed during
|
|
19028
19050
|
// their initial attempt since we don't know whether they are already
|
|
@@ -19033,51 +19055,51 @@ class __PRIVATE_AsyncQueueImpl {
|
|
|
19033
19055
|
// Since `backoffAndRun()` cancels an existing backoff and schedules a
|
|
19034
19056
|
// new backoff on every call, there is only ever a single additional
|
|
19035
19057
|
// operation in the queue.
|
|
19036
|
-
this.
|
|
19058
|
+
this.x_.y_((() => this.ac()));
|
|
19037
19059
|
}
|
|
19038
19060
|
}
|
|
19039
|
-
|
|
19040
|
-
const t = this.
|
|
19041
|
-
this.
|
|
19061
|
+
_c(e) {
|
|
19062
|
+
const t = this.sc.then((() => (this.ec = !0, e().catch((e => {
|
|
19063
|
+
this.Xu = e, this.ec = !1;
|
|
19042
19064
|
// Re-throw the error so that this.tail becomes a rejected Promise and
|
|
19043
19065
|
// all further attempts to chain (via .then) will just short-circuit
|
|
19044
19066
|
// and return the rejected Promise.
|
|
19045
19067
|
throw __PRIVATE_logError("INTERNAL UNHANDLED ERROR: ", __PRIVATE_getMessageOrStack(e)),
|
|
19046
19068
|
e;
|
|
19047
|
-
})).then((e => (this.
|
|
19048
|
-
return this.
|
|
19069
|
+
})).then((e => (this.ec = !1, e))))));
|
|
19070
|
+
return this.sc = t, t;
|
|
19049
19071
|
}
|
|
19050
19072
|
enqueueAfterDelay(e, t, n) {
|
|
19051
|
-
this.
|
|
19073
|
+
this.oc(),
|
|
19052
19074
|
// Fast-forward delays for timerIds that have been overridden.
|
|
19053
|
-
this.
|
|
19054
|
-
const r = DelayedOperation.createAndSchedule(this, e, t, n, (e => this.
|
|
19055
|
-
return this.
|
|
19075
|
+
this.nc.indexOf(e) > -1 && (t = 0);
|
|
19076
|
+
const r = DelayedOperation.createAndSchedule(this, e, t, n, (e => this.uc(e)));
|
|
19077
|
+
return this.Zu.push(r), r;
|
|
19056
19078
|
}
|
|
19057
|
-
|
|
19058
|
-
this.
|
|
19059
|
-
|
|
19079
|
+
oc() {
|
|
19080
|
+
this.Xu && fail(47125, {
|
|
19081
|
+
cc: __PRIVATE_getMessageOrStack(this.Xu)
|
|
19060
19082
|
});
|
|
19061
19083
|
}
|
|
19062
19084
|
verifyOperationInProgress() {}
|
|
19063
19085
|
/**
|
|
19064
19086
|
* Waits until all currently queued tasks are finished executing. Delayed
|
|
19065
19087
|
* operations are not run.
|
|
19066
|
-
*/ async
|
|
19088
|
+
*/ async lc() {
|
|
19067
19089
|
// Operations in the queue prior to draining may have enqueued additional
|
|
19068
19090
|
// operations. Keep draining the queue until the tail is no longer advanced,
|
|
19069
19091
|
// which indicates that no more new operations were enqueued and that all
|
|
19070
19092
|
// operations were executed.
|
|
19071
19093
|
let e;
|
|
19072
19094
|
do {
|
|
19073
|
-
e = this.
|
|
19074
|
-
} while (e !== this.
|
|
19095
|
+
e = this.sc, await e;
|
|
19096
|
+
} while (e !== this.sc);
|
|
19075
19097
|
}
|
|
19076
19098
|
/**
|
|
19077
19099
|
* For Tests: Determine if a delayed operation with a particular TimerId
|
|
19078
19100
|
* exists.
|
|
19079
|
-
*/
|
|
19080
|
-
for (const t of this.
|
|
19101
|
+
*/ hc(e) {
|
|
19102
|
+
for (const t of this.Zu) if (t.timerId === e) return !0;
|
|
19081
19103
|
return !1;
|
|
19082
19104
|
}
|
|
19083
19105
|
/**
|
|
@@ -19086,25 +19108,25 @@ class __PRIVATE_AsyncQueueImpl {
|
|
|
19086
19108
|
* @param lastTimerId - Delayed operations up to and including this TimerId
|
|
19087
19109
|
* will be drained. Pass TimerId.All to run all delayed operations.
|
|
19088
19110
|
* @returns a Promise that resolves once all operations have been run.
|
|
19089
|
-
*/
|
|
19111
|
+
*/ Pc(e) {
|
|
19090
19112
|
// Note that draining may generate more delayed ops, so we do that first.
|
|
19091
|
-
return this.
|
|
19113
|
+
return this.lc().then((() => {
|
|
19092
19114
|
// Run ops in the same order they'd run if they ran naturally.
|
|
19093
19115
|
/* eslint-disable-next-line @typescript-eslint/no-floating-promises */
|
|
19094
|
-
this.
|
|
19095
|
-
for (const t of this.
|
|
19096
|
-
return this.
|
|
19116
|
+
this.Zu.sort(((e, t) => e.targetTimeMs - t.targetTimeMs));
|
|
19117
|
+
for (const t of this.Zu) if (t.skipDelay(), "all" /* TimerId.All */ !== e && t.timerId === e) break;
|
|
19118
|
+
return this.lc();
|
|
19097
19119
|
}));
|
|
19098
19120
|
}
|
|
19099
19121
|
/**
|
|
19100
19122
|
* For Tests: Skip all subsequent delays for a timer id.
|
|
19101
|
-
*/
|
|
19102
|
-
this.
|
|
19123
|
+
*/ Tc(e) {
|
|
19124
|
+
this.nc.push(e);
|
|
19103
19125
|
}
|
|
19104
|
-
/** Called once a DelayedOperation is run or canceled. */
|
|
19126
|
+
/** Called once a DelayedOperation is run or canceled. */ uc(e) {
|
|
19105
19127
|
// NOTE: indexOf / slice are O(n), but delayedOperations is expected to be small.
|
|
19106
|
-
const t = this.
|
|
19107
|
-
/* eslint-disable-next-line @typescript-eslint/no-floating-promises */ this.
|
|
19128
|
+
const t = this.Zu.indexOf(e);
|
|
19129
|
+
/* eslint-disable-next-line @typescript-eslint/no-floating-promises */ this.Zu.splice(t, 1);
|
|
19108
19130
|
}
|
|
19109
19131
|
}
|
|
19110
19132
|
|
|
@@ -19955,7 +19977,7 @@ function __PRIVATE_isWrite(e) {
|
|
|
19955
19977
|
|
|
19956
19978
|
default:
|
|
19957
19979
|
throw fail(40011, {
|
|
19958
|
-
|
|
19980
|
+
Ic: e
|
|
19959
19981
|
});
|
|
19960
19982
|
}
|
|
19961
19983
|
}
|
|
@@ -19983,55 +20005,55 @@ function __PRIVATE_isWrite(e) {
|
|
|
19983
20005
|
this.settings = e, this.databaseId = t, this.serializer = n, this.ignoreUndefinedProperties = r,
|
|
19984
20006
|
// Minor hack: If fieldTransforms is undefined, we assume this is an
|
|
19985
20007
|
// external call and we need to validate the entire path.
|
|
19986
|
-
void 0 === i && this.
|
|
20008
|
+
void 0 === i && this.Ec(), this.fieldTransforms = i || [], this.fieldMask = s || [];
|
|
19987
20009
|
}
|
|
19988
20010
|
get path() {
|
|
19989
20011
|
return this.settings.path;
|
|
19990
20012
|
}
|
|
19991
|
-
get
|
|
19992
|
-
return this.settings.
|
|
20013
|
+
get Ic() {
|
|
20014
|
+
return this.settings.Ic;
|
|
19993
20015
|
}
|
|
19994
|
-
/** Returns a new context with the specified settings overwritten. */
|
|
20016
|
+
/** Returns a new context with the specified settings overwritten. */ dc(e) {
|
|
19995
20017
|
return new __PRIVATE_ParseContextImpl(Object.assign(Object.assign({}, this.settings), e), this.databaseId, this.serializer, this.ignoreUndefinedProperties, this.fieldTransforms, this.fieldMask);
|
|
19996
20018
|
}
|
|
19997
|
-
|
|
20019
|
+
Ac(e) {
|
|
19998
20020
|
var t;
|
|
19999
|
-
const n = null === (t = this.path) || void 0 === t ? void 0 : t.child(e), r = this.
|
|
20021
|
+
const n = null === (t = this.path) || void 0 === t ? void 0 : t.child(e), r = this.dc({
|
|
20000
20022
|
path: n,
|
|
20001
|
-
|
|
20023
|
+
Rc: !1
|
|
20002
20024
|
});
|
|
20003
|
-
return r.
|
|
20025
|
+
return r.Vc(e), r;
|
|
20004
20026
|
}
|
|
20005
|
-
|
|
20027
|
+
mc(e) {
|
|
20006
20028
|
var t;
|
|
20007
|
-
const n = null === (t = this.path) || void 0 === t ? void 0 : t.child(e), r = this.
|
|
20029
|
+
const n = null === (t = this.path) || void 0 === t ? void 0 : t.child(e), r = this.dc({
|
|
20008
20030
|
path: n,
|
|
20009
|
-
|
|
20031
|
+
Rc: !1
|
|
20010
20032
|
});
|
|
20011
|
-
return r.
|
|
20033
|
+
return r.Ec(), r;
|
|
20012
20034
|
}
|
|
20013
|
-
|
|
20035
|
+
fc(e) {
|
|
20014
20036
|
// TODO(b/34871131): We don't support array paths right now; so make path
|
|
20015
20037
|
// undefined.
|
|
20016
|
-
return this.
|
|
20038
|
+
return this.dc({
|
|
20017
20039
|
path: void 0,
|
|
20018
|
-
|
|
20040
|
+
Rc: !0
|
|
20019
20041
|
});
|
|
20020
20042
|
}
|
|
20021
|
-
|
|
20022
|
-
return __PRIVATE_createError(e, this.settings.methodName, this.settings.
|
|
20043
|
+
gc(e) {
|
|
20044
|
+
return __PRIVATE_createError(e, this.settings.methodName, this.settings.yc || !1, this.path, this.settings.wc);
|
|
20023
20045
|
}
|
|
20024
20046
|
/** Returns 'true' if 'fieldPath' was traversed when creating this context. */ contains(e) {
|
|
20025
20047
|
return void 0 !== this.fieldMask.find((t => e.isPrefixOf(t))) || void 0 !== this.fieldTransforms.find((t => e.isPrefixOf(t.field)));
|
|
20026
20048
|
}
|
|
20027
|
-
|
|
20049
|
+
Ec() {
|
|
20028
20050
|
// TODO(b/34871131): Remove null check once we have proper paths for fields
|
|
20029
20051
|
// within arrays.
|
|
20030
|
-
if (this.path) for (let e = 0; e < this.path.length; e++) this.
|
|
20052
|
+
if (this.path) for (let e = 0; e < this.path.length; e++) this.Vc(this.path.get(e));
|
|
20031
20053
|
}
|
|
20032
|
-
|
|
20033
|
-
if (0 === e.length) throw this.
|
|
20034
|
-
if (__PRIVATE_isWrite(this.
|
|
20054
|
+
Vc(e) {
|
|
20055
|
+
if (0 === e.length) throw this.gc("Document fields must not be empty");
|
|
20056
|
+
if (__PRIVATE_isWrite(this.Ic) && Pn.test(e)) throw this.gc('Document fields cannot begin and end with "__"');
|
|
20035
20057
|
}
|
|
20036
20058
|
}
|
|
20037
20059
|
|
|
@@ -20042,14 +20064,14 @@ function __PRIVATE_isWrite(e) {
|
|
|
20042
20064
|
constructor(e, t, n) {
|
|
20043
20065
|
this.databaseId = e, this.ignoreUndefinedProperties = t, this.serializer = n || __PRIVATE_newSerializer(e);
|
|
20044
20066
|
}
|
|
20045
|
-
/** Creates a new top-level parse context. */
|
|
20067
|
+
/** Creates a new top-level parse context. */ Sc(e, t, n, r = !1) {
|
|
20046
20068
|
return new __PRIVATE_ParseContextImpl({
|
|
20047
|
-
|
|
20069
|
+
Ic: e,
|
|
20048
20070
|
methodName: t,
|
|
20049
|
-
|
|
20071
|
+
wc: n,
|
|
20050
20072
|
path: FieldPath$1.emptyPath(),
|
|
20051
|
-
|
|
20052
|
-
|
|
20073
|
+
Rc: !1,
|
|
20074
|
+
yc: r
|
|
20053
20075
|
}, this.databaseId, this.serializer, this.ignoreUndefinedProperties);
|
|
20054
20076
|
}
|
|
20055
20077
|
}
|
|
@@ -20060,7 +20082,7 @@ function __PRIVATE_newUserDataReader(e) {
|
|
|
20060
20082
|
}
|
|
20061
20083
|
|
|
20062
20084
|
/** Parse document data from a set() call. */ function __PRIVATE_parseSetData(e, t, n, r, i, s = {}) {
|
|
20063
|
-
const o = e.
|
|
20085
|
+
const o = e.Sc(s.merge || s.mergeFields ? 2 /* UserDataSource.MergeSet */ : 0 /* UserDataSource.Set */ , t, n, i);
|
|
20064
20086
|
__PRIVATE_validatePlainObject("Data must be an object, but it was:", o, r);
|
|
20065
20087
|
const _ = __PRIVATE_parseObject(r, o);
|
|
20066
20088
|
let a, u;
|
|
@@ -20078,7 +20100,7 @@ function __PRIVATE_newUserDataReader(e) {
|
|
|
20078
20100
|
|
|
20079
20101
|
class __PRIVATE_DeleteFieldValueImpl extends FieldValue {
|
|
20080
20102
|
_toFieldTransform(e) {
|
|
20081
|
-
if (2 /* UserDataSource.MergeSet */ !== e.
|
|
20103
|
+
if (2 /* UserDataSource.MergeSet */ !== e.Ic) throw 1 /* UserDataSource.Update */ === e.Ic ? e.gc(`${this._methodName}() can only appear at the top level of your update data`) : e.gc(`${this._methodName}() cannot be used with set() unless you pass {merge:true}`);
|
|
20082
20104
|
// No transform to add for a delete, but we need to add it to our
|
|
20083
20105
|
// fieldMask so it gets deleted.
|
|
20084
20106
|
return e.fieldMask.push(e.path), null;
|
|
@@ -20105,10 +20127,10 @@ class __PRIVATE_DeleteFieldValueImpl extends FieldValue {
|
|
|
20105
20127
|
* @param arrayElement - Whether or not the FieldValue has an array.
|
|
20106
20128
|
*/ function __PRIVATE_createSentinelChildContext(e, t, n) {
|
|
20107
20129
|
return new __PRIVATE_ParseContextImpl({
|
|
20108
|
-
|
|
20109
|
-
|
|
20130
|
+
Ic: 3 /* UserDataSource.Argument */ ,
|
|
20131
|
+
wc: t.settings.wc,
|
|
20110
20132
|
methodName: e._methodName,
|
|
20111
|
-
|
|
20133
|
+
Rc: n
|
|
20112
20134
|
}, t.databaseId, t.serializer, t.ignoreUndefinedProperties);
|
|
20113
20135
|
}
|
|
20114
20136
|
|
|
@@ -20123,47 +20145,47 @@ class __PRIVATE_ServerTimestampFieldValueImpl extends FieldValue {
|
|
|
20123
20145
|
|
|
20124
20146
|
class __PRIVATE_ArrayUnionFieldValueImpl extends FieldValue {
|
|
20125
20147
|
constructor(e, t) {
|
|
20126
|
-
super(e), this.
|
|
20148
|
+
super(e), this.bc = t;
|
|
20127
20149
|
}
|
|
20128
20150
|
_toFieldTransform(e) {
|
|
20129
20151
|
const t = __PRIVATE_createSentinelChildContext(this, e,
|
|
20130
|
-
/*array=*/ !0), n = this.
|
|
20152
|
+
/*array=*/ !0), n = this.bc.map((e => __PRIVATE_parseData(e, t))), r = new __PRIVATE_ArrayUnionTransformOperation(n);
|
|
20131
20153
|
return new FieldTransform(e.path, r);
|
|
20132
20154
|
}
|
|
20133
20155
|
isEqual(e) {
|
|
20134
|
-
return e instanceof __PRIVATE_ArrayUnionFieldValueImpl && V(this.
|
|
20156
|
+
return e instanceof __PRIVATE_ArrayUnionFieldValueImpl && V(this.bc, e.bc);
|
|
20135
20157
|
}
|
|
20136
20158
|
}
|
|
20137
20159
|
|
|
20138
20160
|
class __PRIVATE_ArrayRemoveFieldValueImpl extends FieldValue {
|
|
20139
20161
|
constructor(e, t) {
|
|
20140
|
-
super(e), this.
|
|
20162
|
+
super(e), this.bc = t;
|
|
20141
20163
|
}
|
|
20142
20164
|
_toFieldTransform(e) {
|
|
20143
20165
|
const t = __PRIVATE_createSentinelChildContext(this, e,
|
|
20144
|
-
/*array=*/ !0), n = this.
|
|
20166
|
+
/*array=*/ !0), n = this.bc.map((e => __PRIVATE_parseData(e, t))), r = new __PRIVATE_ArrayRemoveTransformOperation(n);
|
|
20145
20167
|
return new FieldTransform(e.path, r);
|
|
20146
20168
|
}
|
|
20147
20169
|
isEqual(e) {
|
|
20148
|
-
return e instanceof __PRIVATE_ArrayRemoveFieldValueImpl && V(this.
|
|
20170
|
+
return e instanceof __PRIVATE_ArrayRemoveFieldValueImpl && V(this.bc, e.bc);
|
|
20149
20171
|
}
|
|
20150
20172
|
}
|
|
20151
20173
|
|
|
20152
20174
|
class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
|
|
20153
20175
|
constructor(e, t) {
|
|
20154
|
-
super(e), this.
|
|
20176
|
+
super(e), this.Dc = t;
|
|
20155
20177
|
}
|
|
20156
20178
|
_toFieldTransform(e) {
|
|
20157
|
-
const t = new __PRIVATE_NumericIncrementTransformOperation(e.serializer, toNumber(e.serializer, this.
|
|
20179
|
+
const t = new __PRIVATE_NumericIncrementTransformOperation(e.serializer, toNumber(e.serializer, this.Dc));
|
|
20158
20180
|
return new FieldTransform(e.path, t);
|
|
20159
20181
|
}
|
|
20160
20182
|
isEqual(e) {
|
|
20161
|
-
return e instanceof __PRIVATE_NumericIncrementFieldValueImpl && this.
|
|
20183
|
+
return e instanceof __PRIVATE_NumericIncrementFieldValueImpl && this.Dc === e.Dc;
|
|
20162
20184
|
}
|
|
20163
20185
|
}
|
|
20164
20186
|
|
|
20165
20187
|
/** Parse update data from an update() call. */ function __PRIVATE_parseUpdateData(e, t, n, r) {
|
|
20166
|
-
const i = e.
|
|
20188
|
+
const i = e.Sc(1 /* UserDataSource.Update */ , t, n);
|
|
20167
20189
|
__PRIVATE_validatePlainObject("Data must be an object, but it was:", i, r);
|
|
20168
20190
|
const s = [], o = ObjectValue.empty();
|
|
20169
20191
|
forEach(r, ((e, r) => {
|
|
@@ -20171,7 +20193,7 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
|
|
|
20171
20193
|
// For Compat types, we have to "extract" the underlying types before
|
|
20172
20194
|
// performing validation.
|
|
20173
20195
|
r = f(r);
|
|
20174
|
-
const a = i.
|
|
20196
|
+
const a = i.mc(_);
|
|
20175
20197
|
if (r instanceof __PRIVATE_DeleteFieldValueImpl)
|
|
20176
20198
|
// Add it to the field mask, but don't add anything to updateData.
|
|
20177
20199
|
s.push(_); else {
|
|
@@ -20184,7 +20206,7 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
|
|
|
20184
20206
|
}
|
|
20185
20207
|
|
|
20186
20208
|
/** Parse update data from a list of field/value arguments. */ function __PRIVATE_parseUpdateVarargs(e, t, n, r, i, s) {
|
|
20187
|
-
const o = e.
|
|
20209
|
+
const o = e.Sc(1 /* UserDataSource.Update */ , t, n), _ = [ __PRIVATE_fieldPathFromArgument$1(t, r, n) ], a = [ i ];
|
|
20188
20210
|
if (s.length % 2 != 0) throw new FirestoreError(L.INVALID_ARGUMENT, `Function ${t}() needs to be called with an even number of arguments that alternate between field names and values.`);
|
|
20189
20211
|
for (let e = 0; e < s.length; e += 2) _.push(__PRIVATE_fieldPathFromArgument$1(t, s[e])),
|
|
20190
20212
|
a.push(s[e + 1]);
|
|
@@ -20197,7 +20219,7 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
|
|
|
20197
20219
|
// For Compat types, we have to "extract" the underlying types before
|
|
20198
20220
|
// performing validation.
|
|
20199
20221
|
n = f(n);
|
|
20200
|
-
const r = o.
|
|
20222
|
+
const r = o.mc(t);
|
|
20201
20223
|
if (n instanceof __PRIVATE_DeleteFieldValueImpl)
|
|
20202
20224
|
// Add it to the field mask, but don't add anything to updateData.
|
|
20203
20225
|
u.push(t); else {
|
|
@@ -20216,7 +20238,7 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
|
|
|
20216
20238
|
* @param allowArrays - Whether the query value is an array that may directly
|
|
20217
20239
|
* contain additional arrays (e.g. the operand of an `in` query).
|
|
20218
20240
|
*/ function __PRIVATE_parseQueryValue(e, t, n, r = !1) {
|
|
20219
|
-
return __PRIVATE_parseData(n, e.
|
|
20241
|
+
return __PRIVATE_parseData(n, e.Sc(r ? 4 /* UserDataSource.ArrayArgument */ : 3 /* UserDataSource.Argument */ , t));
|
|
20220
20242
|
}
|
|
20221
20243
|
|
|
20222
20244
|
/**
|
|
@@ -20245,8 +20267,8 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
|
|
|
20245
20267
|
*/
|
|
20246
20268
|
return function __PRIVATE_parseSentinelFieldValue(e, t) {
|
|
20247
20269
|
// Sentinels are only supported with writes, and not within arrays.
|
|
20248
|
-
if (!__PRIVATE_isWrite(t.
|
|
20249
|
-
if (!t.path) throw t.
|
|
20270
|
+
if (!__PRIVATE_isWrite(t.Ic)) throw t.gc(`${e._methodName}() can only be used with update() and set()`);
|
|
20271
|
+
if (!t.path) throw t.gc(`${e._methodName}() is not currently supported inside arrays`);
|
|
20250
20272
|
const n = e._toFieldTransform(t);
|
|
20251
20273
|
n && t.fieldTransforms.push(n);
|
|
20252
20274
|
}
|
|
@@ -20270,12 +20292,12 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
|
|
|
20270
20292
|
// the set of values to be included for the IN query) that may directly
|
|
20271
20293
|
// contain additional arrays (each representing an individual field
|
|
20272
20294
|
// value), so we disable this validation.
|
|
20273
|
-
if (t.settings.
|
|
20295
|
+
if (t.settings.Rc && 4 /* UserDataSource.ArrayArgument */ !== t.Ic) throw t.gc("Nested arrays are not supported");
|
|
20274
20296
|
return function __PRIVATE_parseArray(e, t) {
|
|
20275
20297
|
const n = [];
|
|
20276
20298
|
let r = 0;
|
|
20277
20299
|
for (const i of e) {
|
|
20278
|
-
let e = __PRIVATE_parseData(i, t.
|
|
20300
|
+
let e = __PRIVATE_parseData(i, t.fc(r));
|
|
20279
20301
|
null == e && (
|
|
20280
20302
|
// Just include nulls in the array for fields being replaced with a
|
|
20281
20303
|
// sentinel.
|
|
@@ -20327,7 +20349,7 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
|
|
|
20327
20349
|
};
|
|
20328
20350
|
if (e instanceof DocumentReference) {
|
|
20329
20351
|
const n = t.databaseId, r = e.firestore._databaseId;
|
|
20330
|
-
if (!r.isEqual(n)) throw t.
|
|
20352
|
+
if (!r.isEqual(n)) throw t.gc(`Document reference is for database ${r.projectId}/${r.database} but should be for database ${n.projectId}/${n.database}`);
|
|
20331
20353
|
return {
|
|
20332
20354
|
referenceValue: __PRIVATE_toResourceName(e.firestore._databaseId || t.databaseId, e._key.path)
|
|
20333
20355
|
};
|
|
@@ -20345,7 +20367,7 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
|
|
|
20345
20367
|
[dt]: {
|
|
20346
20368
|
arrayValue: {
|
|
20347
20369
|
values: e.toArray().map((e => {
|
|
20348
|
-
if ("number" != typeof e) throw t.
|
|
20370
|
+
if ("number" != typeof e) throw t.gc("VectorValues must only contain numeric values.");
|
|
20349
20371
|
return __PRIVATE_toDouble(t.serializer, e);
|
|
20350
20372
|
}))
|
|
20351
20373
|
}
|
|
@@ -20363,7 +20385,7 @@ class __PRIVATE_NumericIncrementFieldValueImpl extends FieldValue {
|
|
|
20363
20385
|
* GeoPoints, etc. are not considered to look like JSON objects since they map
|
|
20364
20386
|
* to specific FieldValue types other than ObjectValue.
|
|
20365
20387
|
*/ (e, t);
|
|
20366
|
-
throw t.
|
|
20388
|
+
throw t.gc(`Unsupported field value: ${__PRIVATE_valueDescription(e)}`);
|
|
20367
20389
|
}(e, t);
|
|
20368
20390
|
}
|
|
20369
20391
|
|
|
@@ -20373,7 +20395,7 @@ function __PRIVATE_parseObject(e, t) {
|
|
|
20373
20395
|
// If we encounter an empty object, we explicitly add it to the update
|
|
20374
20396
|
// mask to ensure that the server creates a map entry.
|
|
20375
20397
|
t.path && t.path.length > 0 && t.fieldMask.push(t.path) : forEach(e, ((e, r) => {
|
|
20376
|
-
const i = __PRIVATE_parseData(r, t.
|
|
20398
|
+
const i = __PRIVATE_parseData(r, t.Ac(e));
|
|
20377
20399
|
null != i && (n[e] = i);
|
|
20378
20400
|
})), {
|
|
20379
20401
|
mapValue: {
|
|
@@ -20391,7 +20413,7 @@ function __PRIVATE_validatePlainObject(e, t, n) {
|
|
|
20391
20413
|
return "object" == typeof e && null !== e && (Object.getPrototypeOf(e) === Object.prototype || null === Object.getPrototypeOf(e));
|
|
20392
20414
|
}(n)) {
|
|
20393
20415
|
const r = __PRIVATE_valueDescription(n);
|
|
20394
|
-
throw "an object" === r ? t.
|
|
20416
|
+
throw "an object" === r ? t.gc(e + " a custom object") : t.gc(e + " " + r);
|
|
20395
20417
|
}
|
|
20396
20418
|
}
|
|
20397
20419
|
|
|
@@ -21696,7 +21718,7 @@ function onSnapshot(e, ...t) {
|
|
|
21696
21718
|
const i = new __PRIVATE_AsyncObserver(r), s = new __PRIVATE_QueryListener(t, i, n);
|
|
21697
21719
|
return e.asyncQueue.enqueueAndForget((async () => __PRIVATE_eventManagerListen(await __PRIVATE_getEventManager(e), s))),
|
|
21698
21720
|
() => {
|
|
21699
|
-
i.
|
|
21721
|
+
i.xu(), e.asyncQueue.enqueueAndForget((async () => __PRIVATE_eventManagerUnlisten(await __PRIVATE_getEventManager(e), s)));
|
|
21700
21722
|
};
|
|
21701
21723
|
}(ensureFirestoreConfigured(u), c, _, a);
|
|
21702
21724
|
}
|
|
@@ -22205,7 +22227,7 @@ function __PRIVATE_validateReference(e, t) {
|
|
|
22205
22227
|
const r = new __PRIVATE_Deferred;
|
|
22206
22228
|
return e.asyncQueue.enqueueAndForget((async () => {
|
|
22207
22229
|
const i = await __PRIVATE_getDatastore(e);
|
|
22208
|
-
new __PRIVATE_TransactionRunner(e.asyncQueue, i, n, t, r)
|
|
22230
|
+
new __PRIVATE_TransactionRunner(e.asyncQueue, i, n, t, r).Wu();
|
|
22209
22231
|
})), r.promise;
|
|
22210
22232
|
}(ensureFirestoreConfigured(e), (n => t(new Transaction(e, n))), r);
|
|
22211
22233
|
}
|
|
@@ -22572,7 +22594,7 @@ function _internalQueryToProtoQueryTarget(e) {
|
|
|
22572
22594
|
* The implementation of `TestingHooksSpi`.
|
|
22573
22595
|
*/ class __PRIVATE_TestingHooksSpiImpl {
|
|
22574
22596
|
constructor() {
|
|
22575
|
-
this.
|
|
22597
|
+
this.vc = new Map;
|
|
22576
22598
|
}
|
|
22577
22599
|
static get instance() {
|
|
22578
22600
|
return dn || (dn = new __PRIVATE_TestingHooksSpiImpl, function __PRIVATE_setTestingHooksSpi(e) {
|
|
@@ -22581,10 +22603,10 @@ function _internalQueryToProtoQueryTarget(e) {
|
|
|
22581
22603
|
}(dn)), dn;
|
|
22582
22604
|
}
|
|
22583
22605
|
ht(e) {
|
|
22584
|
-
this.
|
|
22606
|
+
this.vc.forEach((t => t(e)));
|
|
22585
22607
|
}
|
|
22586
22608
|
onExistenceFilterMismatch(e) {
|
|
22587
|
-
const t = Symbol(), n = this.
|
|
22609
|
+
const t = Symbol(), n = this.vc;
|
|
22588
22610
|
return n.set(t, e), () => n.delete(t);
|
|
22589
22611
|
}
|
|
22590
22612
|
}
|