@dxos/echo-pipeline 0.6.4 → 0.6.5-staging.097cf0c
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/lib/browser/{chunk-6MJEONOX.mjs → chunk-2MII6KJX.mjs} +464 -104
- package/dist/lib/browser/chunk-2MII6KJX.mjs.map +7 -0
- package/dist/lib/browser/index.mjs +3 -1
- package/dist/lib/browser/meta.json +1 -1
- package/dist/lib/browser/testing/index.mjs +1 -1
- package/dist/lib/node/{chunk-PT5LWMPA.cjs → chunk-6MWU4MHX.cjs} +475 -116
- package/dist/lib/node/chunk-6MWU4MHX.cjs.map +7 -0
- package/dist/lib/node/index.cjs +36 -34
- package/dist/lib/node/index.cjs.map +2 -2
- package/dist/lib/node/meta.json +1 -1
- package/dist/lib/node/testing/index.cjs +11 -11
- package/dist/types/src/automerge/automerge-host.d.ts +3 -2
- package/dist/types/src/automerge/automerge-host.d.ts.map +1 -1
- package/dist/types/src/automerge/echo-data-monitor.d.ts +73 -0
- package/dist/types/src/automerge/echo-data-monitor.d.ts.map +1 -0
- package/dist/types/src/automerge/echo-data-monitor.test.d.ts +2 -0
- package/dist/types/src/automerge/echo-data-monitor.test.d.ts.map +1 -0
- package/dist/types/src/automerge/echo-network-adapter.d.ts +9 -0
- package/dist/types/src/automerge/echo-network-adapter.d.ts.map +1 -1
- package/dist/types/src/automerge/index.d.ts +1 -0
- package/dist/types/src/automerge/index.d.ts.map +1 -1
- package/dist/types/src/automerge/leveldb-storage-adapter.d.ts +7 -0
- package/dist/types/src/automerge/leveldb-storage-adapter.d.ts.map +1 -1
- package/dist/types/src/space/space.d.ts +0 -1
- package/dist/types/src/space/space.d.ts.map +1 -1
- package/package.json +41 -39
- package/src/automerge/automerge-host.ts +11 -34
- package/src/automerge/echo-data-monitor.test.ts +55 -0
- package/src/automerge/echo-data-monitor.ts +374 -0
- package/src/automerge/echo-network-adapter.ts +37 -13
- package/src/automerge/index.ts +1 -0
- package/src/automerge/leveldb-storage-adapter.ts +19 -1
- package/src/space/space.ts +1 -3
- package/dist/lib/browser/chunk-6MJEONOX.mjs.map +0 -7
- package/dist/lib/node/chunk-PT5LWMPA.cjs.map +0 -7
|
@@ -0,0 +1,374 @@
|
|
|
1
|
+
//
|
|
2
|
+
// Copyright 2024 DXOS.org
|
|
3
|
+
//
|
|
4
|
+
|
|
5
|
+
import { type Message } from '@dxos/automerge/automerge-repo';
|
|
6
|
+
import { type TimeAware, trace } from '@dxos/tracing';
|
|
7
|
+
import { CircularBuffer, mapValues, SlidingWindowSummary, type SlidingWindowSummaryConfig } from '@dxos/util';
|
|
8
|
+
|
|
9
|
+
import { type NetworkDataMonitor } from './echo-network-adapter';
|
|
10
|
+
import { type StorageAdapterDataMonitor } from './leveldb-storage-adapter';
|
|
11
|
+
import { isCollectionQueryMessage, isCollectionStateMessage } from './network-protocol';
|
|
12
|
+
|
|
13
|
+
const PER_SECOND_RATE_AVG_WINDOW_SIZE = 5;
|
|
14
|
+
const DEFAULT_AVG_WINDOW_SIZE = 25;
|
|
15
|
+
|
|
16
|
+
export type EchoDataMonitorOptions = {
|
|
17
|
+
timeSeriesLength: number;
|
|
18
|
+
};
|
|
19
|
+
|
|
20
|
+
@trace.resource()
|
|
21
|
+
export class EchoDataMonitor implements StorageAdapterDataMonitor, NetworkDataMonitor, TimeAware {
|
|
22
|
+
private _lastTick = 0;
|
|
23
|
+
|
|
24
|
+
private _activeCounters = createLocalCounters();
|
|
25
|
+
private _lastCompleteCounters: LocalCounters | undefined;
|
|
26
|
+
private readonly _localTimeSeries = createLocalTimeSeries();
|
|
27
|
+
private readonly _storageAverages = createStorageAverages();
|
|
28
|
+
private readonly _replicationAverages = createNetworkAverages();
|
|
29
|
+
private readonly _sizeByMessageType: { [type: string]: SlidingWindowSummary } = {};
|
|
30
|
+
private readonly _lastReceivedMessages = new CircularBuffer<StoredMessage>(100);
|
|
31
|
+
private readonly _lastSentMessages = new CircularBuffer<StoredMessage>(100);
|
|
32
|
+
|
|
33
|
+
private _connectionsCount = 0;
|
|
34
|
+
|
|
35
|
+
constructor(private readonly _params: EchoDataMonitorOptions = { timeSeriesLength: 30 }) {}
|
|
36
|
+
|
|
37
|
+
public tick(timeMs: number) {
|
|
38
|
+
this._advanceTimeWindow(timeMs - this._lastTick);
|
|
39
|
+
this._lastTick = timeMs;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
public computeStats(): EchoDataStats {
|
|
43
|
+
return {
|
|
44
|
+
meta: {
|
|
45
|
+
rateAverageOverSeconds: PER_SECOND_RATE_AVG_WINDOW_SIZE,
|
|
46
|
+
},
|
|
47
|
+
storage: {
|
|
48
|
+
reads: {
|
|
49
|
+
payloadSize: this._storageAverages.loadedChunkSize.average(),
|
|
50
|
+
opDuration: this._storageAverages.loadDuration.average(),
|
|
51
|
+
countPerSecond: this._storageAverages.loadsPerSecond.average(),
|
|
52
|
+
},
|
|
53
|
+
writes: {
|
|
54
|
+
payloadSize: this._storageAverages.storedChunkSize.average(),
|
|
55
|
+
opDuration: this._storageAverages.storeDuration.average(),
|
|
56
|
+
countPerSecond: this._storageAverages.storesPerSecond.average(),
|
|
57
|
+
},
|
|
58
|
+
},
|
|
59
|
+
replicator: {
|
|
60
|
+
connections: this._connectionsCount,
|
|
61
|
+
receivedMessages: {
|
|
62
|
+
payloadSize: this._replicationAverages.receivedMessageSize.average(),
|
|
63
|
+
countPerSecond: this._replicationAverages.receivedPerSecond.average(),
|
|
64
|
+
},
|
|
65
|
+
sentMessages: {
|
|
66
|
+
payloadSize: this._replicationAverages.sentMessageSize.average(),
|
|
67
|
+
opDuration: this._replicationAverages.sendDuration.average(),
|
|
68
|
+
countPerSecond: this._replicationAverages.sentPerSecond.average(),
|
|
69
|
+
failedPerSecond: this._replicationAverages.sendsFailedPerSecond.average(),
|
|
70
|
+
},
|
|
71
|
+
countByMessageType: this._computeMessageHistogram('type'),
|
|
72
|
+
avgSizeByMessageType: mapValues(this._sizeByMessageType, (summary) => summary.average()),
|
|
73
|
+
},
|
|
74
|
+
};
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
public get connectionsCount() {
|
|
78
|
+
return this._connectionsCount;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* @internal
|
|
83
|
+
*/
|
|
84
|
+
get lastPerSecondStats() {
|
|
85
|
+
return this._lastCompleteCounters;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* @internal
|
|
90
|
+
*/
|
|
91
|
+
get timeSeries() {
|
|
92
|
+
return { ...this._localTimeSeries.storage, ...this._localTimeSeries.replication };
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
* @internal
|
|
97
|
+
*/
|
|
98
|
+
get messagesByPeerId() {
|
|
99
|
+
return this._computeMessageHistogram('peerId');
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
private _advanceTimeWindow(millisPassed: number) {
|
|
103
|
+
const oldMetrics = Object.freeze(this._activeCounters);
|
|
104
|
+
this._activeCounters = createLocalCounters();
|
|
105
|
+
this._lastCompleteCounters = oldMetrics;
|
|
106
|
+
for (const peerId of Object.keys(oldMetrics.byPeerId)) {
|
|
107
|
+
this._activeCounters.byPeerId[peerId] = createMessageCounter();
|
|
108
|
+
}
|
|
109
|
+
this._addToTimeSeries(oldMetrics.replication, this._localTimeSeries.replication);
|
|
110
|
+
this._addToTimeSeries(oldMetrics.storage, this._localTimeSeries.storage);
|
|
111
|
+
// Prevent skewed measurements of incomplete buckets / after CPU freezes.
|
|
112
|
+
if (Math.abs(millisPassed - 1000) < 100) {
|
|
113
|
+
this._reportPerSecondRate(oldMetrics);
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
private _addToTimeSeries<T extends object>(values: T, timeSeries: TimeSeries<T>) {
|
|
118
|
+
for (const [key, value] of Object.entries(values)) {
|
|
119
|
+
const values: (typeof value)[] = (timeSeries as any)[key];
|
|
120
|
+
values.push(value);
|
|
121
|
+
if (values.length > this._params.timeSeriesLength) {
|
|
122
|
+
values.shift();
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
private _reportPerSecondRate(metrics: LocalCounters) {
|
|
128
|
+
const toReport: [string, number, SlidingWindowSummary][] = [
|
|
129
|
+
['storage.load', metrics.storage.loadedChunks, this._storageAverages.loadsPerSecond],
|
|
130
|
+
['storage.store', metrics.storage.storedChunks, this._storageAverages.storesPerSecond],
|
|
131
|
+
['network.receive', metrics.replication.received, this._replicationAverages.receivedPerSecond],
|
|
132
|
+
['network.send', metrics.replication.sent, this._replicationAverages.sentPerSecond],
|
|
133
|
+
];
|
|
134
|
+
for (const [metricName, metric, summary] of toReport) {
|
|
135
|
+
summary.record(metric);
|
|
136
|
+
if (metric > 0) {
|
|
137
|
+
trace.metrics.distribution(`dxos.echo.${metricName}-rate`, metric);
|
|
138
|
+
trace.metrics.increment(`dxos.echo.${metricName}`, 1, { tags: { status: 'busy' } });
|
|
139
|
+
} else {
|
|
140
|
+
trace.metrics.increment(`dxos.echo.${metricName}`, 1, { tags: { status: 'idle' } });
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
this._replicationAverages.sendsFailedPerSecond.record(metrics.replication.failed);
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
public recordPeerConnected(peerId: string) {
|
|
147
|
+
this._activeCounters.byPeerId[peerId] = createMessageCounter();
|
|
148
|
+
this._connectionsCount++;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
public recordPeerDisconnected(peerId: string) {
|
|
152
|
+
this._connectionsCount--;
|
|
153
|
+
delete this._activeCounters.byPeerId[peerId];
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
public recordBytesStored(count: number) {
|
|
157
|
+
this._activeCounters.storage.storedChunks++;
|
|
158
|
+
this._activeCounters.storage.storedBytes += count;
|
|
159
|
+
this._storageAverages.storedChunkSize.record(count);
|
|
160
|
+
trace.metrics.distribution('dxos.echo.storage.bytes-stored', count, { unit: 'bytes' });
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
public recordLoadDuration(durationMs: number): void {
|
|
164
|
+
this._storageAverages.loadDuration.record(durationMs);
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
public recordStoreDuration(durationMs: number): void {
|
|
168
|
+
this._storageAverages.storeDuration.record(durationMs);
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
public recordBytesLoaded(count: number) {
|
|
172
|
+
this._activeCounters.storage.loadedChunks++;
|
|
173
|
+
this._activeCounters.storage.loadedBytes += count;
|
|
174
|
+
this._storageAverages.loadedChunkSize.record(count);
|
|
175
|
+
trace.metrics.distribution('dxos.echo.storage.bytes-loaded', count, { unit: 'bytes' });
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
public recordMessageSent(message: Message, duration: number) {
|
|
179
|
+
let metricsGroupName;
|
|
180
|
+
const bytes = getByteCount(message);
|
|
181
|
+
const tags = { type: message.type };
|
|
182
|
+
if (isAutomergeProtocolMessage(message)) {
|
|
183
|
+
this._activeCounters.replication.sent++;
|
|
184
|
+
this._replicationAverages.sendDuration.record(duration);
|
|
185
|
+
this._replicationAverages.sentMessageSize.record(bytes);
|
|
186
|
+
metricsGroupName = 'replication';
|
|
187
|
+
} else {
|
|
188
|
+
metricsGroupName = 'collection-sync';
|
|
189
|
+
}
|
|
190
|
+
trace.metrics.distribution(`dxos.echo.${metricsGroupName}.bytes-sent`, bytes, { unit: 'bytes', tags });
|
|
191
|
+
trace.metrics.distribution(`dxos.echo.${metricsGroupName}.send-duration`, duration, { unit: 'millisecond', tags });
|
|
192
|
+
trace.metrics.increment(`dxos.echo.${metricsGroupName}.send-status`, 1, { tags: { ...tags, success: true } });
|
|
193
|
+
const { messageSize, messageCounts } = this._getStatsForType(message);
|
|
194
|
+
messageSize.record(bytes);
|
|
195
|
+
messageCounts.sent++;
|
|
196
|
+
this._lastSentMessages.push({ type: message.type, peerId: message.targetId });
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
public recordMessageReceived(message: Message) {
|
|
200
|
+
const bytes = getByteCount(message);
|
|
201
|
+
const tags = { type: message.type };
|
|
202
|
+
if (isAutomergeProtocolMessage(message)) {
|
|
203
|
+
this._activeCounters.replication.received++;
|
|
204
|
+
this._replicationAverages.receivedMessageSize.record(bytes);
|
|
205
|
+
trace.metrics.distribution('dxos.echo.replication.bytes-received', bytes, { unit: 'bytes', tags });
|
|
206
|
+
} else {
|
|
207
|
+
trace.metrics.distribution('dxos.echo.collection-sync.bytes-received', bytes, { unit: 'bytes', tags });
|
|
208
|
+
}
|
|
209
|
+
const { messageSize, messageCounts } = this._getStatsForType(message);
|
|
210
|
+
messageSize.record(bytes);
|
|
211
|
+
messageCounts.received++;
|
|
212
|
+
this._lastReceivedMessages.push({ type: message.type, peerId: message.senderId });
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
public recordMessageSendingFailed(message: Message) {
|
|
216
|
+
const tags = { type: message.type, success: false };
|
|
217
|
+
if (isAutomergeProtocolMessage(message)) {
|
|
218
|
+
this._activeCounters.replication.failed++;
|
|
219
|
+
trace.metrics.increment('dxos.echo.replication.send-status', 1, { unit: 'bytes', tags });
|
|
220
|
+
} else {
|
|
221
|
+
trace.metrics.increment('dxos.echo.collection-sync.send-status', 1, { unit: 'bytes', tags });
|
|
222
|
+
}
|
|
223
|
+
const { messageCounts } = this._getStatsForType(message);
|
|
224
|
+
messageCounts.failed++;
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
private _getStatsForType(message: Message) {
|
|
228
|
+
const messageSize = (this._sizeByMessageType[message.type] ??= createSlidingWindow());
|
|
229
|
+
const messageCounts = (this._activeCounters.byType[message.type] ??= createMessageCounter());
|
|
230
|
+
return { messageCounts, messageSize };
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
private _computeMessageHistogram(groupKey: keyof StoredMessage): MessageAttributeHistogram {
|
|
234
|
+
const result: MessageAttributeHistogram = {};
|
|
235
|
+
for (const receivedMessage of this._lastReceivedMessages) {
|
|
236
|
+
const counters = (result[receivedMessage[groupKey]] ??= { received: 0, sent: 0 });
|
|
237
|
+
counters.received++;
|
|
238
|
+
}
|
|
239
|
+
for (const receivedMessage of this._lastSentMessages) {
|
|
240
|
+
const counters = (result[receivedMessage[groupKey]] ??= { received: 0, sent: 0 });
|
|
241
|
+
counters.sent++;
|
|
242
|
+
}
|
|
243
|
+
return result;
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
type BaseDataOpStats = {
|
|
248
|
+
payloadSize: number;
|
|
249
|
+
countPerSecond: number;
|
|
250
|
+
};
|
|
251
|
+
|
|
252
|
+
export type TimedDataOpStats = BaseDataOpStats & { opDuration: number };
|
|
253
|
+
|
|
254
|
+
type TimeSeries<T extends object> = { [key in keyof T]: T[key][] };
|
|
255
|
+
|
|
256
|
+
type StorageCounts = {
|
|
257
|
+
storedChunks: number;
|
|
258
|
+
storedBytes: number;
|
|
259
|
+
loadedChunks: number;
|
|
260
|
+
loadedBytes: number;
|
|
261
|
+
};
|
|
262
|
+
type StorageCountTimeSeries = TimeSeries<StorageCounts>;
|
|
263
|
+
|
|
264
|
+
type MessageCounts = {
|
|
265
|
+
sent: number;
|
|
266
|
+
received: number;
|
|
267
|
+
failed: number;
|
|
268
|
+
};
|
|
269
|
+
type MessageCountTimeSeries = TimeSeries<MessageCounts>;
|
|
270
|
+
|
|
271
|
+
type MessageAttributeHistogram = {
|
|
272
|
+
[messageType: string]: {
|
|
273
|
+
received: number;
|
|
274
|
+
sent: number;
|
|
275
|
+
};
|
|
276
|
+
};
|
|
277
|
+
|
|
278
|
+
export type EchoDataStats = {
|
|
279
|
+
meta: {
|
|
280
|
+
rateAverageOverSeconds: number;
|
|
281
|
+
};
|
|
282
|
+
storage: {
|
|
283
|
+
reads: TimedDataOpStats;
|
|
284
|
+
writes: TimedDataOpStats;
|
|
285
|
+
};
|
|
286
|
+
replicator: {
|
|
287
|
+
connections: number;
|
|
288
|
+
receivedMessages: BaseDataOpStats;
|
|
289
|
+
sentMessages: TimedDataOpStats & { failedPerSecond: number };
|
|
290
|
+
avgSizeByMessageType: { [messageType: string]: number };
|
|
291
|
+
countByMessageType: MessageAttributeHistogram;
|
|
292
|
+
};
|
|
293
|
+
};
|
|
294
|
+
|
|
295
|
+
type StoredMessage = { type: string; peerId: string };
|
|
296
|
+
|
|
297
|
+
type StorageAverages = {
|
|
298
|
+
storedChunkSize: SlidingWindowSummary;
|
|
299
|
+
storesPerSecond: SlidingWindowSummary;
|
|
300
|
+
loadedChunkSize: SlidingWindowSummary;
|
|
301
|
+
loadsPerSecond: SlidingWindowSummary;
|
|
302
|
+
loadDuration: SlidingWindowSummary;
|
|
303
|
+
storeDuration: SlidingWindowSummary;
|
|
304
|
+
};
|
|
305
|
+
|
|
306
|
+
type NetworkAverages = {
|
|
307
|
+
receivedMessageSize: SlidingWindowSummary;
|
|
308
|
+
receivedPerSecond: SlidingWindowSummary;
|
|
309
|
+
sentMessageSize: SlidingWindowSummary;
|
|
310
|
+
sentPerSecond: SlidingWindowSummary;
|
|
311
|
+
sendDuration: SlidingWindowSummary;
|
|
312
|
+
sendsFailedPerSecond: SlidingWindowSummary;
|
|
313
|
+
};
|
|
314
|
+
|
|
315
|
+
type LocalCounters = {
|
|
316
|
+
storage: StorageCounts;
|
|
317
|
+
replication: MessageCounts;
|
|
318
|
+
byPeerId: { [peerId: string]: MessageCounts };
|
|
319
|
+
byType: { [type: string]: MessageCounts };
|
|
320
|
+
};
|
|
321
|
+
|
|
322
|
+
type LocalTimeSeries = {
|
|
323
|
+
storage: StorageCountTimeSeries;
|
|
324
|
+
replication: MessageCountTimeSeries;
|
|
325
|
+
};
|
|
326
|
+
|
|
327
|
+
const isAutomergeProtocolMessage = (message: Message) => {
|
|
328
|
+
return !(isCollectionQueryMessage(message) || isCollectionStateMessage(message));
|
|
329
|
+
};
|
|
330
|
+
|
|
331
|
+
const createSlidingWindow = (overrides?: SlidingWindowSummaryConfig) =>
|
|
332
|
+
new SlidingWindowSummary({ dataPoints: DEFAULT_AVG_WINDOW_SIZE, precision: 2, ...overrides });
|
|
333
|
+
|
|
334
|
+
const createLocalCounters = (): LocalCounters => ({
|
|
335
|
+
storage: { loadedBytes: 0, storedBytes: 0, storedChunks: 0, loadedChunks: 0 },
|
|
336
|
+
replication: createMessageCounter(),
|
|
337
|
+
byPeerId: {},
|
|
338
|
+
byType: {},
|
|
339
|
+
});
|
|
340
|
+
|
|
341
|
+
const createLocalTimeSeries = (): LocalTimeSeries => ({
|
|
342
|
+
storage: { loadedBytes: [], storedBytes: [], storedChunks: [], loadedChunks: [] },
|
|
343
|
+
replication: { sent: [], failed: [], received: [] },
|
|
344
|
+
});
|
|
345
|
+
|
|
346
|
+
const createMessageCounter = (): MessageCounts => ({ sent: 0, received: 0, failed: 0 });
|
|
347
|
+
|
|
348
|
+
const createNetworkAverages = (): NetworkAverages => ({
|
|
349
|
+
receivedMessageSize: createSlidingWindow(),
|
|
350
|
+
sentMessageSize: createSlidingWindow(),
|
|
351
|
+
sendDuration: createSlidingWindow(),
|
|
352
|
+
receivedPerSecond: createSlidingWindow({ dataPoints: PER_SECOND_RATE_AVG_WINDOW_SIZE }),
|
|
353
|
+
sentPerSecond: createSlidingWindow({ dataPoints: PER_SECOND_RATE_AVG_WINDOW_SIZE }),
|
|
354
|
+
sendsFailedPerSecond: createSlidingWindow({ dataPoints: PER_SECOND_RATE_AVG_WINDOW_SIZE }),
|
|
355
|
+
});
|
|
356
|
+
|
|
357
|
+
const createStorageAverages = (): StorageAverages => ({
|
|
358
|
+
storedChunkSize: createSlidingWindow(),
|
|
359
|
+
loadedChunkSize: createSlidingWindow(),
|
|
360
|
+
loadDuration: createSlidingWindow(),
|
|
361
|
+
storeDuration: createSlidingWindow(),
|
|
362
|
+
loadsPerSecond: createSlidingWindow({ dataPoints: PER_SECOND_RATE_AVG_WINDOW_SIZE }),
|
|
363
|
+
storesPerSecond: createSlidingWindow({ dataPoints: PER_SECOND_RATE_AVG_WINDOW_SIZE }),
|
|
364
|
+
});
|
|
365
|
+
|
|
366
|
+
const getByteCount = (message: Message): number => {
|
|
367
|
+
return (
|
|
368
|
+
message.type.length +
|
|
369
|
+
message.senderId.length +
|
|
370
|
+
message.targetId.length +
|
|
371
|
+
(message.data?.byteLength ?? 0) +
|
|
372
|
+
(message.documentId?.length ?? 0)
|
|
373
|
+
);
|
|
374
|
+
};
|
|
@@ -23,10 +23,19 @@ import {
|
|
|
23
23
|
type CollectionStateMessage,
|
|
24
24
|
} from './network-protocol';
|
|
25
25
|
|
|
26
|
+
export interface NetworkDataMonitor {
|
|
27
|
+
recordPeerConnected(peerId: string): void;
|
|
28
|
+
recordPeerDisconnected(peerId: string): void;
|
|
29
|
+
recordMessageSent(message: Message, duration: number): void;
|
|
30
|
+
recordMessageReceived(message: Message): void;
|
|
31
|
+
recordMessageSendingFailed(message: Message): void;
|
|
32
|
+
}
|
|
33
|
+
|
|
26
34
|
export type EchoNetworkAdapterParams = {
|
|
27
35
|
getContainingSpaceForDocument: (documentId: string) => Promise<PublicKey | null>;
|
|
28
36
|
onCollectionStateQueried: (collectionId: string, peerId: PeerId) => void;
|
|
29
37
|
onCollectionStateReceived: (collectionId: string, peerId: PeerId, state: unknown) => void;
|
|
38
|
+
monitor?: NetworkDataMonitor;
|
|
30
39
|
};
|
|
31
40
|
|
|
32
41
|
/**
|
|
@@ -52,17 +61,7 @@ export class EchoNetworkAdapter extends NetworkAdapter {
|
|
|
52
61
|
}
|
|
53
62
|
|
|
54
63
|
override send(message: Message): void {
|
|
55
|
-
|
|
56
|
-
if (!connectionEntry) {
|
|
57
|
-
throw new Error('Connection not found.');
|
|
58
|
-
}
|
|
59
|
-
|
|
60
|
-
// TODO(dmaretskyi): Find a way to enforce backpressure on AM-repo.
|
|
61
|
-
connectionEntry.writer.write(message).catch((err) => {
|
|
62
|
-
if (connectionEntry.isOpen) {
|
|
63
|
-
log.catch(err);
|
|
64
|
-
}
|
|
65
|
-
});
|
|
64
|
+
this._send(message);
|
|
66
65
|
}
|
|
67
66
|
|
|
68
67
|
override disconnect(): void {
|
|
@@ -149,7 +148,7 @@ export class EchoNetworkAdapter extends NetworkAdapter {
|
|
|
149
148
|
targetId,
|
|
150
149
|
collectionId,
|
|
151
150
|
};
|
|
152
|
-
this.
|
|
151
|
+
this._send(message);
|
|
153
152
|
}
|
|
154
153
|
|
|
155
154
|
sendCollectionState(collectionId: string, targetId: PeerId, state: unknown): void {
|
|
@@ -160,7 +159,29 @@ export class EchoNetworkAdapter extends NetworkAdapter {
|
|
|
160
159
|
collectionId,
|
|
161
160
|
state,
|
|
162
161
|
};
|
|
163
|
-
this.
|
|
162
|
+
this._send(message);
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
private _send(message: Message) {
|
|
166
|
+
const connectionEntry = this._connections.get(message.targetId);
|
|
167
|
+
if (!connectionEntry) {
|
|
168
|
+
throw new Error('Connection not found.');
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
const writeStart = Date.now();
|
|
172
|
+
// TODO(dmaretskyi): Find a way to enforce backpressure on AM-repo.
|
|
173
|
+
connectionEntry.writer
|
|
174
|
+
.write(message)
|
|
175
|
+
.then(() => {
|
|
176
|
+
const durationMs = Date.now() - writeStart;
|
|
177
|
+
this._params.monitor?.recordMessageSent(message, durationMs);
|
|
178
|
+
})
|
|
179
|
+
.catch((err) => {
|
|
180
|
+
if (connectionEntry.isOpen) {
|
|
181
|
+
log.catch(err);
|
|
182
|
+
}
|
|
183
|
+
this._params.monitor?.recordMessageSendingFailed(message);
|
|
184
|
+
});
|
|
164
185
|
}
|
|
165
186
|
|
|
166
187
|
// TODO(dmaretskyi): Remove.
|
|
@@ -202,6 +223,7 @@ export class EchoNetworkAdapter extends NetworkAdapter {
|
|
|
202
223
|
|
|
203
224
|
log('emit peer-candidate', { peerId: connection.peerId });
|
|
204
225
|
this._emitPeerCandidate(connection);
|
|
226
|
+
this._params.monitor?.recordPeerConnected(connection.peerId);
|
|
205
227
|
}
|
|
206
228
|
|
|
207
229
|
private _onMessage(message: Message) {
|
|
@@ -212,6 +234,7 @@ export class EchoNetworkAdapter extends NetworkAdapter {
|
|
|
212
234
|
} else {
|
|
213
235
|
this.emit('message', message);
|
|
214
236
|
}
|
|
237
|
+
this._params.monitor?.recordMessageReceived(message);
|
|
215
238
|
}
|
|
216
239
|
|
|
217
240
|
/**
|
|
@@ -233,6 +256,7 @@ export class EchoNetworkAdapter extends NetworkAdapter {
|
|
|
233
256
|
|
|
234
257
|
entry.isOpen = false;
|
|
235
258
|
this.emit('peer-disconnected', { peerId: connection.peerId as PeerId });
|
|
259
|
+
this._params.monitor?.recordPeerDisconnected(connection.peerId);
|
|
236
260
|
|
|
237
261
|
void entry.reader.cancel().catch((err) => log.catch(err));
|
|
238
262
|
void entry.writer.abort().catch((err) => log.catch(err));
|
package/src/automerge/index.ts
CHANGED
|
@@ -9,9 +9,17 @@ import { LifecycleState, Resource } from '@dxos/context';
|
|
|
9
9
|
import { type BatchLevel, type SublevelDB } from '@dxos/kv-store';
|
|
10
10
|
import { type MaybePromise } from '@dxos/util';
|
|
11
11
|
|
|
12
|
+
export interface StorageAdapterDataMonitor {
|
|
13
|
+
recordBytesStored(count: number): void;
|
|
14
|
+
recordBytesLoaded(count: number): void;
|
|
15
|
+
recordLoadDuration(durationMs: number): void;
|
|
16
|
+
recordStoreDuration(durationMs: number): void;
|
|
17
|
+
}
|
|
18
|
+
|
|
12
19
|
export type LevelDBStorageAdapterParams = {
|
|
13
20
|
db: SublevelDB;
|
|
14
21
|
callbacks?: StorageCallbacks;
|
|
22
|
+
monitor?: StorageAdapterDataMonitor;
|
|
15
23
|
};
|
|
16
24
|
|
|
17
25
|
export type BeforeSaveParams = { path: StorageKey; batch: BatchLevel };
|
|
@@ -32,7 +40,11 @@ export class LevelDBStorageAdapter extends Resource implements StorageAdapterInt
|
|
|
32
40
|
// TODO(mykola): this should be an error.
|
|
33
41
|
return undefined;
|
|
34
42
|
}
|
|
35
|
-
|
|
43
|
+
const startMs = Date.now();
|
|
44
|
+
const chunk = await this._params.db.get<StorageKey, Uint8Array>(keyArray, { ...encodingOptions });
|
|
45
|
+
this._params.monitor?.recordBytesLoaded(chunk.byteLength);
|
|
46
|
+
this._params.monitor?.recordLoadDuration(Date.now() - startMs);
|
|
47
|
+
return chunk;
|
|
36
48
|
} catch (err: any) {
|
|
37
49
|
if (isLevelDbNotFoundError(err)) {
|
|
38
50
|
return undefined;
|
|
@@ -45,6 +57,7 @@ export class LevelDBStorageAdapter extends Resource implements StorageAdapterInt
|
|
|
45
57
|
if (this._lifecycleState !== LifecycleState.OPEN) {
|
|
46
58
|
return undefined;
|
|
47
59
|
}
|
|
60
|
+
const startMs = Date.now();
|
|
48
61
|
const batch = this._params.db.batch();
|
|
49
62
|
|
|
50
63
|
await this._params.callbacks?.beforeSave?.({ path: keyArray, batch });
|
|
@@ -52,8 +65,10 @@ export class LevelDBStorageAdapter extends Resource implements StorageAdapterInt
|
|
|
52
65
|
...encodingOptions,
|
|
53
66
|
});
|
|
54
67
|
await batch.write();
|
|
68
|
+
this._params.monitor?.recordBytesStored(binary.byteLength);
|
|
55
69
|
|
|
56
70
|
await this._params.callbacks?.afterSave?.(keyArray);
|
|
71
|
+
this._params.monitor?.recordStoreDuration(Date.now() - startMs);
|
|
57
72
|
}
|
|
58
73
|
|
|
59
74
|
async remove(keyArray: StorageKey): Promise<void> {
|
|
@@ -67,6 +82,7 @@ export class LevelDBStorageAdapter extends Resource implements StorageAdapterInt
|
|
|
67
82
|
if (this._lifecycleState !== LifecycleState.OPEN) {
|
|
68
83
|
return [];
|
|
69
84
|
}
|
|
85
|
+
const startMs = Date.now();
|
|
70
86
|
const result: Chunk[] = [];
|
|
71
87
|
for await (const [key, value] of this._params.db.iterator<StorageKey, Uint8Array>({
|
|
72
88
|
gte: keyPrefix,
|
|
@@ -77,7 +93,9 @@ export class LevelDBStorageAdapter extends Resource implements StorageAdapterInt
|
|
|
77
93
|
key,
|
|
78
94
|
data: value,
|
|
79
95
|
});
|
|
96
|
+
this._params.monitor?.recordBytesLoaded(value.byteLength);
|
|
80
97
|
}
|
|
98
|
+
this._params.monitor?.recordLoadDuration(Date.now() - startMs);
|
|
81
99
|
return result;
|
|
82
100
|
}
|
|
83
101
|
|
package/src/space/space.ts
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
// Copyright 2022 DXOS.org
|
|
3
3
|
//
|
|
4
4
|
|
|
5
|
-
import { Event,
|
|
5
|
+
import { Event, synchronized, trackLeaks } from '@dxos/async';
|
|
6
6
|
import { type Context, LifecycleState, Resource } from '@dxos/context';
|
|
7
7
|
import { type DelegateInvitationCredential, type FeedInfo, type MemberInfo } from '@dxos/credentials';
|
|
8
8
|
import { subtleCrypto } from '@dxos/crypto';
|
|
@@ -54,8 +54,6 @@ export type CreatePipelineParams = {
|
|
|
54
54
|
@trackLeaks('open', 'close')
|
|
55
55
|
@trace.resource()
|
|
56
56
|
export class Space extends Resource {
|
|
57
|
-
private readonly _addFeedMutex = new Mutex();
|
|
58
|
-
|
|
59
57
|
public readonly onCredentialProcessed = new Callback<AsyncCallback<Credential>>();
|
|
60
58
|
public readonly stateUpdate = new Event();
|
|
61
59
|
@trace.info()
|