@graphql-yoga/plugin-apollo-usage-report 0.13.0 → 0.13.1-alpha-20260116132831-dc9fc0ad2f1ad6ee99bc438c173cf8496ae505a7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,23 @@
1
+ import { OurReport } from "./stats.cjs";
2
+ import { ApolloUsageReportOptions } from "./index.cjs";
3
+ import { YogaLogger, YogaServer } from "graphql-yoga";
4
+ import { google } from "@apollo/usage-reporting-protobuf";
5
+
6
+ //#region src/reporter.d.ts
7
+ declare class Reporter {
8
+ private yoga;
9
+ private logger;
10
+ private reportHeaders;
11
+ private options;
12
+ private reportsBySchema;
13
+ private nextSendAfterDelay?;
14
+ private sending;
15
+ constructor(options: ApolloUsageReportOptions, yoga: YogaServer<Record<string, unknown>, Record<string, unknown>>, logger: YogaLogger);
16
+ addTrace(schemaId: string, options: Parameters<OurReport['addTrace']>[0]): Promise<void> | undefined;
17
+ flush(): Promise<PromiseSettledResult<unknown>[]>;
18
+ sendReport(schemaId: string): Promise<void>;
19
+ private _sendReport;
20
+ private getReport;
21
+ }
22
+ //#endregion
23
+ export { Reporter };
@@ -0,0 +1,23 @@
1
+ import { OurReport } from "./stats.mjs";
2
+ import { ApolloUsageReportOptions } from "./index.mjs";
3
+ import { YogaLogger, YogaServer } from "graphql-yoga";
4
+ import { google } from "@apollo/usage-reporting-protobuf";
5
+
6
+ //#region src/reporter.d.ts
7
+ declare class Reporter {
8
+ private yoga;
9
+ private logger;
10
+ private reportHeaders;
11
+ private options;
12
+ private reportsBySchema;
13
+ private nextSendAfterDelay?;
14
+ private sending;
15
+ constructor(options: ApolloUsageReportOptions, yoga: YogaServer<Record<string, unknown>, Record<string, unknown>>, logger: YogaLogger);
16
+ addTrace(schemaId: string, options: Parameters<OurReport['addTrace']>[0]): Promise<void> | undefined;
17
+ flush(): Promise<PromiseSettledResult<unknown>[]>;
18
+ sendReport(schemaId: string): Promise<void>;
19
+ private _sendReport;
20
+ private getReport;
21
+ }
22
+ //#endregion
23
+ export { Reporter };
@@ -0,0 +1,117 @@
1
+ import { OurReport } from "./stats.mjs";
2
+ import { Report, ReportHeader, google } from "@apollo/usage-reporting-protobuf";
3
+
4
+ //#region src/reporter.ts
5
+ const DEFAULT_REPORTING_ENDPOINT = "https://usage-reporting.api.apollographql.com/api/ingress/traces";
6
+ var Reporter = class {
7
+ reportHeaders;
8
+ options;
9
+ reportsBySchema = {};
10
+ nextSendAfterDelay;
11
+ sending = [];
12
+ constructor(options, yoga, logger) {
13
+ this.yoga = yoga;
14
+ this.logger = logger;
15
+ this.options = {
16
+ ...options,
17
+ maxBatchDelay: options.maxBatchDelay ?? 2e4,
18
+ maxBatchUncompressedSize: options.maxBatchUncompressedSize ?? 4 * 1024 * 1024,
19
+ maxTraceSize: options.maxTraceSize ?? 10 * 1024 * 1024,
20
+ exportTimeout: options.exportTimeout ?? 3e4,
21
+ onError: options.onError ?? ((err) => this.logger.error("Failed to send report", err))
22
+ };
23
+ this.reportHeaders = {
24
+ graphRef: getGraphRef(options),
25
+ hostname: options.hostname ?? getEnvVar("HOSTNAME") ?? "",
26
+ uname: options.uname ?? "",
27
+ runtimeVersion: options.runtimeVersion ?? "",
28
+ agentVersion: options.agentVersion || `graphql-yoga@${yoga.version}`
29
+ };
30
+ }
31
+ addTrace(schemaId, options) {
32
+ const report = this.getReport(schemaId);
33
+ report.addTrace(options);
34
+ if (this.options.alwaysSend || report.sizeEstimator.bytes >= this.options.maxBatchUncompressedSize) return this._sendReport(schemaId);
35
+ this.nextSendAfterDelay ||= setTimeout(() => this.flush(), this.options.maxBatchDelay);
36
+ }
37
+ async flush() {
38
+ return Promise.allSettled([...this.sending, ...Object.keys(this.reportsBySchema).map((schemaId) => this._sendReport(schemaId))]);
39
+ }
40
+ async sendReport(schemaId) {
41
+ const sending = this._sendReport(schemaId);
42
+ this.sending.push(sending);
43
+ sending.finally(() => this.sending = this.sending?.filter((p) => p !== sending));
44
+ return sending;
45
+ }
46
+ async _sendReport(schemaId) {
47
+ const { fetchAPI: { fetch, CompressionStream, ReadableStream } } = this.yoga;
48
+ const report = this.reportsBySchema[schemaId];
49
+ if (!report) throw new Error(`No report to send for schema ${schemaId}`);
50
+ if (this.nextSendAfterDelay != null) {
51
+ clearTimeout(this.nextSendAfterDelay);
52
+ this.nextSendAfterDelay = void 0;
53
+ }
54
+ delete this.reportsBySchema[schemaId];
55
+ report.endTime = dateToProtoTimestamp(/* @__PURE__ */ new Date());
56
+ report.ensureCountsAreIntegers();
57
+ const validationError = Report.verify(report);
58
+ if (validationError) throw new TypeError(`Invalid report: ${validationError}`);
59
+ const { apiKey = getEnvVar("APOLLO_KEY"), endpoint = DEFAULT_REPORTING_ENDPOINT } = this.options;
60
+ const encodedReport = Report.encode(report).finish();
61
+ let lastError;
62
+ for (let tries = 0; tries < 5; tries++) try {
63
+ this.logger.debug(`Sending report (try ${tries}/5)`);
64
+ const response = await fetch(endpoint, {
65
+ method: "POST",
66
+ headers: {
67
+ "content-type": "application/protobuf",
68
+ "content-encoding": "gzip",
69
+ "x-api-key": apiKey,
70
+ accept: "application/json"
71
+ },
72
+ body: new ReadableStream({ start(controller) {
73
+ controller.enqueue(encodedReport);
74
+ controller.close();
75
+ } }).pipeThrough(new CompressionStream("gzip")),
76
+ signal: AbortSignal.timeout(this.options.exportTimeout)
77
+ });
78
+ const result = await response.text();
79
+ if (response.ok) {
80
+ this.logger.debug("Report sent:", result);
81
+ return;
82
+ }
83
+ throw result;
84
+ } catch (err) {
85
+ lastError = err;
86
+ this.logger.error("Failed to send report:", err);
87
+ }
88
+ this.options.onError(new Error("Failed to send traces after 5 tries", { cause: lastError }));
89
+ }
90
+ getReport(schemaId) {
91
+ const report = this.reportsBySchema[schemaId];
92
+ if (report) return report;
93
+ return this.reportsBySchema[schemaId] = new OurReport(new ReportHeader({
94
+ ...this.reportHeaders,
95
+ executableSchemaId: schemaId
96
+ }));
97
+ }
98
+ };
99
+ function getGraphRef(options) {
100
+ const graphRef = options.graphRef || getEnvVar("APOLLO_GRAPH_REF");
101
+ if (!graphRef) throw new Error("Missing GraphRef. Either provide `graphRef` option or `APOLLO_GRAPH_REF` environment variable");
102
+ return graphRef;
103
+ }
104
+ function getEnvVar(name, defaultValue) {
105
+ return globalThis.process?.env?.[name] || defaultValue || void 0;
106
+ }
107
+ function dateToProtoTimestamp(date) {
108
+ const totalMillis = date.getTime();
109
+ const millis = totalMillis % 1e3;
110
+ return new google.protobuf.Timestamp({
111
+ seconds: (totalMillis - millis) / 1e3,
112
+ nanos: millis * 1e6
113
+ });
114
+ }
115
+
116
+ //#endregion
117
+ export { Reporter, getEnvVar };
package/dist/stats.cjs ADDED
@@ -0,0 +1,344 @@
1
+ let _apollo_usage_reporting_protobuf = require("@apollo/usage-reporting-protobuf");
2
+
3
+ //#region src/stats.ts
4
+ var SizeEstimator = class {
5
+ bytes = 0;
6
+ };
7
+ var OurReport = class {
8
+ tracesPreAggregated = false;
9
+ constructor(header) {
10
+ this.header = header;
11
+ }
12
+ tracesPerQuery = Object.create(null);
13
+ endTime = null;
14
+ operationCount = 0;
15
+ sizeEstimator = new SizeEstimator();
16
+ ensureCountsAreIntegers() {
17
+ for (const tracesAndStats of Object.values(this.tracesPerQuery)) tracesAndStats.ensureCountsAreIntegers();
18
+ }
19
+ addTrace({ statsReportKey, trace, asTrace, referencedFieldsByType, maxTraceBytes = 10 * 1024 * 1024, nonFtv1ErrorPaths }) {
20
+ const tracesAndStats = this.getTracesAndStats({
21
+ statsReportKey,
22
+ referencedFieldsByType
23
+ });
24
+ if (asTrace) {
25
+ const encodedTrace = _apollo_usage_reporting_protobuf.Trace.encode(trace).finish();
26
+ if (!isNaN(maxTraceBytes) && encodedTrace.length > maxTraceBytes) tracesAndStats.statsWithContext.addTrace(trace, this.sizeEstimator, nonFtv1ErrorPaths);
27
+ else {
28
+ tracesAndStats.trace.push(encodedTrace);
29
+ this.sizeEstimator.bytes += 2 + encodedTrace.length;
30
+ }
31
+ } else tracesAndStats.statsWithContext.addTrace(trace, this.sizeEstimator, nonFtv1ErrorPaths);
32
+ }
33
+ getTracesAndStats({ statsReportKey, referencedFieldsByType }) {
34
+ const existing = this.tracesPerQuery[statsReportKey];
35
+ if (existing) return existing;
36
+ this.sizeEstimator.bytes += estimatedBytesForString(statsReportKey);
37
+ for (const [typeName, referencedFieldsForType] of Object.entries(referencedFieldsByType)) {
38
+ this.sizeEstimator.bytes += 4;
39
+ if (referencedFieldsForType.isInterface) this.sizeEstimator.bytes += 2;
40
+ this.sizeEstimator.bytes += estimatedBytesForString(typeName);
41
+ for (const fieldName of referencedFieldsForType.fieldNames) this.sizeEstimator.bytes += estimatedBytesForString(fieldName);
42
+ }
43
+ return this.tracesPerQuery[statsReportKey] = new OurTracesAndStats(referencedFieldsByType);
44
+ }
45
+ };
46
+ var OurTracesAndStats = class {
47
+ constructor(referencedFieldsByType) {
48
+ this.referencedFieldsByType = referencedFieldsByType;
49
+ }
50
+ trace = [];
51
+ statsWithContext = new StatsByContext();
52
+ internalTracesContributingToStats = [];
53
+ ensureCountsAreIntegers() {
54
+ this.statsWithContext.ensureCountsAreIntegers();
55
+ }
56
+ };
57
+ var StatsByContext = class {
58
+ map = Object.create(null);
59
+ /**
60
+ * This function is used by the protobuf generator to convert this map into
61
+ * an array of contextualized stats to serialize
62
+ */
63
+ toArray() {
64
+ return Object.values(this.map);
65
+ }
66
+ ensureCountsAreIntegers() {
67
+ for (const contextualizedStats of Object.values(this.map)) contextualizedStats.ensureCountsAreIntegers();
68
+ }
69
+ addTrace(trace, sizeEstimator, nonFtv1ErrorPaths) {
70
+ this.getContextualizedStats(trace, sizeEstimator).addTrace(trace, sizeEstimator, nonFtv1ErrorPaths);
71
+ }
72
+ getContextualizedStats(trace, sizeEstimator) {
73
+ const statsContext = {
74
+ clientName: trace.clientName,
75
+ clientVersion: trace.clientVersion
76
+ };
77
+ const statsContextKey = JSON.stringify(statsContext);
78
+ const existing = this.map[statsContextKey];
79
+ if (existing) return existing;
80
+ sizeEstimator.bytes += 20 + estimatedBytesForString(trace.clientName) + estimatedBytesForString(trace.clientVersion);
81
+ const contextualizedStats = new OurContextualizedStats(statsContext);
82
+ this.map[statsContextKey] = contextualizedStats;
83
+ return contextualizedStats;
84
+ }
85
+ };
86
+ var OurContextualizedStats = class {
87
+ queryLatencyStats = new OurQueryLatencyStats();
88
+ perTypeStat = Object.create(null);
89
+ constructor(context) {
90
+ this.context = context;
91
+ }
92
+ ensureCountsAreIntegers() {
93
+ for (const typeStat of Object.values(this.perTypeStat)) typeStat.ensureCountsAreIntegers();
94
+ }
95
+ addTrace(trace, sizeEstimator, nonFtv1ErrorPaths = []) {
96
+ const { fieldExecutionWeight } = trace;
97
+ if (!fieldExecutionWeight) this.queryLatencyStats.requestsWithoutFieldInstrumentation++;
98
+ this.queryLatencyStats.requestCount++;
99
+ if (trace.fullQueryCacheHit) {
100
+ this.queryLatencyStats.cacheLatencyCount.incrementDuration(trace.durationNs);
101
+ this.queryLatencyStats.cacheHits++;
102
+ } else this.queryLatencyStats.latencyCount.incrementDuration(trace.durationNs);
103
+ if (!trace.fullQueryCacheHit && trace.cachePolicy?.maxAgeNs != null) switch (trace.cachePolicy.scope) {
104
+ case _apollo_usage_reporting_protobuf.Trace.CachePolicy.Scope.PRIVATE:
105
+ this.queryLatencyStats.privateCacheTtlCount.incrementDuration(trace.cachePolicy.maxAgeNs);
106
+ break;
107
+ case _apollo_usage_reporting_protobuf.Trace.CachePolicy.Scope.PUBLIC:
108
+ this.queryLatencyStats.publicCacheTtlCount.incrementDuration(trace.cachePolicy.maxAgeNs);
109
+ break;
110
+ }
111
+ if (trace.persistedQueryHit) this.queryLatencyStats.persistedQueryHits++;
112
+ if (trace.persistedQueryRegister) this.queryLatencyStats.persistedQueryMisses++;
113
+ if (trace.forbiddenOperation) this.queryLatencyStats.forbiddenOperationCount++;
114
+ if (trace.registeredOperation) this.queryLatencyStats.registeredOperationCount++;
115
+ let hasError = false;
116
+ const errorPathStats = /* @__PURE__ */ new Set();
117
+ const traceNodeStats = (node, path) => {
118
+ if (node.error?.length) {
119
+ hasError = true;
120
+ let currPathErrorStats = this.queryLatencyStats.rootErrorStats;
121
+ path.toArray().forEach((subPath) => {
122
+ currPathErrorStats = currPathErrorStats.getChild(subPath, sizeEstimator);
123
+ });
124
+ errorPathStats.add(currPathErrorStats);
125
+ currPathErrorStats.errorsCount += node.error.length;
126
+ }
127
+ if (fieldExecutionWeight) {
128
+ const fieldName = node.originalFieldName || node.responseName;
129
+ if (node.parentType && fieldName && node.type && node.endTime != null && node.startTime != null && node.endTime >= node.startTime) {
130
+ const fieldStat = this.getTypeStat(node.parentType, sizeEstimator).getFieldStat(fieldName, node.type, sizeEstimator);
131
+ fieldStat.errorsCount += node.error?.length ?? 0;
132
+ fieldStat.observedExecutionCount++;
133
+ fieldStat.estimatedExecutionCount += fieldExecutionWeight;
134
+ fieldStat.requestsWithErrorsCount += (node.error?.length ?? 0) > 0 ? 1 : 0;
135
+ fieldStat.latencyCount.incrementDuration(node.endTime - node.startTime, fieldExecutionWeight);
136
+ }
137
+ }
138
+ return false;
139
+ };
140
+ iterateOverTrace(trace, traceNodeStats, true);
141
+ for (const { subgraph, path } of nonFtv1ErrorPaths) {
142
+ hasError = true;
143
+ if (path) {
144
+ let currPathErrorStats = this.queryLatencyStats.rootErrorStats.getChild(`service:${subgraph}`, sizeEstimator);
145
+ path.forEach((subPath) => {
146
+ if (typeof subPath === "string") currPathErrorStats = currPathErrorStats.getChild(subPath, sizeEstimator);
147
+ });
148
+ errorPathStats.add(currPathErrorStats);
149
+ currPathErrorStats.errorsCount += 1;
150
+ }
151
+ }
152
+ for (const errorPath of errorPathStats) errorPath.requestsWithErrorsCount += 1;
153
+ if (hasError) this.queryLatencyStats.requestsWithErrorsCount++;
154
+ }
155
+ getTypeStat(parentType, sizeEstimator) {
156
+ const existing = this.perTypeStat[parentType];
157
+ if (existing) return existing;
158
+ sizeEstimator.bytes += estimatedBytesForString(parentType);
159
+ const typeStat = new OurTypeStat();
160
+ this.perTypeStat[parentType] = typeStat;
161
+ return typeStat;
162
+ }
163
+ };
164
+ var OurQueryLatencyStats = class {
165
+ latencyCount = new DurationHistogram();
166
+ requestCount = 0;
167
+ requestsWithoutFieldInstrumentation = 0;
168
+ cacheHits = 0;
169
+ persistedQueryHits = 0;
170
+ persistedQueryMisses = 0;
171
+ cacheLatencyCount = new DurationHistogram();
172
+ rootErrorStats = new OurPathErrorStats();
173
+ requestsWithErrorsCount = 0;
174
+ publicCacheTtlCount = new DurationHistogram();
175
+ privateCacheTtlCount = new DurationHistogram();
176
+ registeredOperationCount = 0;
177
+ forbiddenOperationCount = 0;
178
+ };
179
+ var OurPathErrorStats = class OurPathErrorStats {
180
+ children = Object.create(null);
181
+ errorsCount = 0;
182
+ requestsWithErrorsCount = 0;
183
+ getChild(subPath, sizeEstimator) {
184
+ const existing = this.children[subPath];
185
+ if (existing) return existing;
186
+ const child = new OurPathErrorStats();
187
+ this.children[subPath] = child;
188
+ sizeEstimator.bytes += estimatedBytesForString(subPath) + 4;
189
+ return child;
190
+ }
191
+ };
192
+ var OurTypeStat = class {
193
+ perFieldStat = Object.create(null);
194
+ getFieldStat(fieldName, returnType, sizeEstimator) {
195
+ const existing = this.perFieldStat[fieldName];
196
+ if (existing) return existing;
197
+ sizeEstimator.bytes += estimatedBytesForString(fieldName) + estimatedBytesForString(returnType) + 10;
198
+ const fieldStat = new OurFieldStat(returnType);
199
+ this.perFieldStat[fieldName] = fieldStat;
200
+ return fieldStat;
201
+ }
202
+ ensureCountsAreIntegers() {
203
+ for (const fieldStat of Object.values(this.perFieldStat)) fieldStat.ensureCountsAreIntegers();
204
+ }
205
+ };
206
+ var OurFieldStat = class {
207
+ errorsCount = 0;
208
+ observedExecutionCount = 0;
209
+ estimatedExecutionCount = 0;
210
+ requestsWithErrorsCount = 0;
211
+ latencyCount = new DurationHistogram();
212
+ constructor(returnType) {
213
+ this.returnType = returnType;
214
+ }
215
+ ensureCountsAreIntegers() {
216
+ this.estimatedExecutionCount = Math.floor(this.estimatedExecutionCount);
217
+ }
218
+ };
219
+ function estimatedBytesForString(s) {
220
+ return 2 + Buffer.byteLength(s);
221
+ }
222
+ var DurationHistogram = class DurationHistogram {
223
+ buckets;
224
+ static BUCKET_COUNT = 384;
225
+ static EXPONENT_LOG = Math.log(1.1);
226
+ toArray() {
227
+ let bufferedZeroes = 0;
228
+ const outputArray = [];
229
+ for (const value of this.buckets) if (value === 0) bufferedZeroes++;
230
+ else {
231
+ if (bufferedZeroes === 1) outputArray.push(0);
232
+ else if (bufferedZeroes !== 0) outputArray.push(-bufferedZeroes);
233
+ outputArray.push(Math.floor(value));
234
+ bufferedZeroes = 0;
235
+ }
236
+ return outputArray;
237
+ }
238
+ static durationToBucket(durationNs) {
239
+ const log = Math.log(durationNs / 1e3);
240
+ const unboundedBucket = Math.ceil(log / DurationHistogram.EXPONENT_LOG);
241
+ return unboundedBucket <= 0 || Number.isNaN(unboundedBucket) ? 0 : unboundedBucket >= DurationHistogram.BUCKET_COUNT ? DurationHistogram.BUCKET_COUNT - 1 : unboundedBucket;
242
+ }
243
+ incrementDuration(durationNs, value = 1) {
244
+ this.incrementBucket(DurationHistogram.durationToBucket(durationNs), value);
245
+ return this;
246
+ }
247
+ incrementBucket(bucket, value = 1) {
248
+ if (bucket >= DurationHistogram.BUCKET_COUNT) throw Error("Bucket is out of bounds of the buckets array");
249
+ if (bucket >= this.buckets.length) {
250
+ const oldLength = this.buckets.length;
251
+ this.buckets.length = bucket + 1;
252
+ this.buckets.fill(0, oldLength);
253
+ }
254
+ this.buckets[bucket] += value;
255
+ }
256
+ combine(otherHistogram) {
257
+ for (let i = 0; i < otherHistogram.buckets.length; i++) this.incrementBucket(i, otherHistogram.buckets[i]);
258
+ }
259
+ constructor(options) {
260
+ const initSize = options?.initSize || 74;
261
+ const buckets = options?.buckets;
262
+ const arrayInitSize = Math.max(buckets?.length || 0, initSize);
263
+ this.buckets = Array(arrayInitSize).fill(0);
264
+ if (buckets) buckets.forEach((val, index) => this.buckets[index] = val);
265
+ }
266
+ };
267
+ /**
268
+ * Iterates over the entire trace, calling `f` on each Trace.Node found. It
269
+ * looks under the "root" node as well as any inside the query plan. If any `f`
270
+ * returns true, it stops walking the tree.
271
+ *
272
+ * Each call to `f` will receive an object that implements ResponseNamePath. If
273
+ * `includePath` is true, `f` can call `toArray()` on it to convert the
274
+ * linked-list representation to an array of the response name (field name)
275
+ * nodes that you navigate to get to the node (including a "service:subgraph"
276
+ * top-level node if this is a federated trace). Note that we don't add anything
277
+ * to the path for index (list element) nodes. This is because the only use case
278
+ * we have (error path statistics) does not care about list indexes (it's not
279
+ * that interesting to know that sometimes an error was at foo.3.bar and
280
+ * sometimes foo.5.bar, vs just generally foo.bar).
281
+ *
282
+ * If `includePath` is false, we don't bother to build up the linked lists, and
283
+ * calling `toArray()` will throw.
284
+ */
285
+ function iterateOverTrace(trace, f, includePath) {
286
+ const rootPath = includePath ? new RootCollectingPathsResponseNamePath() : notCollectingPathsResponseNamePath;
287
+ if (trace.root) {
288
+ if (iterateOverTraceNode(trace.root, rootPath, f)) return;
289
+ }
290
+ if (trace.queryPlan) {
291
+ if (iterateOverQueryPlan(trace.queryPlan, rootPath, f)) return;
292
+ }
293
+ }
294
+ function iterateOverQueryPlan(node, rootPath, f) {
295
+ if (!node) return false;
296
+ if (node.fetch?.trace?.root && node.fetch.serviceName) return iterateOverTraceNode(node.fetch.trace.root, rootPath.child(`service:${node.fetch.serviceName}`), f);
297
+ if (node.flatten?.node) return iterateOverQueryPlan(node.flatten.node, rootPath, f);
298
+ if (node.parallel?.nodes) return node.parallel.nodes.some((node$1) => iterateOverQueryPlan(node$1, rootPath, f));
299
+ if (node.sequence?.nodes) return node.sequence.nodes.some((node$1) => iterateOverQueryPlan(node$1, rootPath, f));
300
+ return false;
301
+ }
302
+ function iterateOverTraceNode(node, path, f) {
303
+ if (f(node, path)) return true;
304
+ return node.child?.some((child) => {
305
+ return iterateOverTraceNode(child, child.responseName ? path.child(child.responseName) : path, f);
306
+ }) ?? false;
307
+ }
308
+ const notCollectingPathsResponseNamePath = {
309
+ toArray() {
310
+ throw Error("not collecting paths!");
311
+ },
312
+ child() {
313
+ return this;
314
+ }
315
+ };
316
+ var RootCollectingPathsResponseNamePath = class {
317
+ toArray() {
318
+ return [];
319
+ }
320
+ child(responseName) {
321
+ return new ChildCollectingPathsResponseNamePath(responseName, this);
322
+ }
323
+ };
324
+ var ChildCollectingPathsResponseNamePath = class ChildCollectingPathsResponseNamePath {
325
+ constructor(responseName, prev) {
326
+ this.responseName = responseName;
327
+ this.prev = prev;
328
+ }
329
+ toArray() {
330
+ const out = [];
331
+ let curr = this;
332
+ while (curr instanceof ChildCollectingPathsResponseNamePath) {
333
+ out.push(curr.responseName);
334
+ curr = curr.prev;
335
+ }
336
+ return out.reverse();
337
+ }
338
+ child(responseName) {
339
+ return new ChildCollectingPathsResponseNamePath(responseName, this);
340
+ }
341
+ };
342
+
343
+ //#endregion
344
+ exports.OurReport = OurReport;
@@ -0,0 +1,123 @@
1
+ import { ReferencedFieldsByType } from "@apollo/utils.usagereporting";
2
+ import { IContextualizedStats, IFieldStat, IPathErrorStats, IQueryLatencyStats, IReport, IStatsContext, ITracesAndStats, ITypeStat, ReportHeader, Trace, google } from "@apollo/usage-reporting-protobuf";
3
+ import { NonFtv1ErrorPath } from "@apollo/server-gateway-interface";
4
+
5
+ //#region src/stats.d.ts
6
+ declare class SizeEstimator {
7
+ bytes: number;
8
+ }
9
+ declare class OurReport implements Required<IReport> {
10
+ readonly header: ReportHeader;
11
+ tracesPreAggregated: boolean;
12
+ constructor(header: ReportHeader);
13
+ readonly tracesPerQuery: Record<string, OurTracesAndStats>;
14
+ endTime: google.protobuf.ITimestamp | null;
15
+ operationCount: number;
16
+ readonly sizeEstimator: SizeEstimator;
17
+ ensureCountsAreIntegers(): void;
18
+ addTrace({
19
+ statsReportKey,
20
+ trace,
21
+ asTrace,
22
+ referencedFieldsByType,
23
+ maxTraceBytes,
24
+ nonFtv1ErrorPaths
25
+ }: {
26
+ statsReportKey: string;
27
+ trace: Trace;
28
+ asTrace: boolean;
29
+ referencedFieldsByType: ReferencedFieldsByType;
30
+ maxTraceBytes?: number;
31
+ nonFtv1ErrorPaths: NonFtv1ErrorPath[];
32
+ }): void;
33
+ private getTracesAndStats;
34
+ }
35
+ declare class OurTracesAndStats implements Required<ITracesAndStats> {
36
+ readonly referencedFieldsByType: ReferencedFieldsByType;
37
+ constructor(referencedFieldsByType: ReferencedFieldsByType);
38
+ readonly trace: Uint8Array[];
39
+ readonly statsWithContext: StatsByContext;
40
+ readonly internalTracesContributingToStats: Uint8Array[];
41
+ ensureCountsAreIntegers(): void;
42
+ }
43
+ declare class StatsByContext {
44
+ readonly map: {
45
+ [k: string]: OurContextualizedStats;
46
+ };
47
+ /**
48
+ * This function is used by the protobuf generator to convert this map into
49
+ * an array of contextualized stats to serialize
50
+ */
51
+ toArray(): IContextualizedStats[];
52
+ ensureCountsAreIntegers(): void;
53
+ addTrace(trace: Trace, sizeEstimator: SizeEstimator, nonFtv1ErrorPaths: NonFtv1ErrorPath[]): void;
54
+ private getContextualizedStats;
55
+ }
56
+ declare class OurContextualizedStats implements Required<IContextualizedStats> {
57
+ readonly context: IStatsContext;
58
+ queryLatencyStats: OurQueryLatencyStats;
59
+ perTypeStat: {
60
+ [k: string]: OurTypeStat;
61
+ };
62
+ constructor(context: IStatsContext);
63
+ ensureCountsAreIntegers(): void;
64
+ addTrace(trace: Trace, sizeEstimator: SizeEstimator, nonFtv1ErrorPaths?: NonFtv1ErrorPath[]): void;
65
+ getTypeStat(parentType: string, sizeEstimator: SizeEstimator): OurTypeStat;
66
+ }
67
+ declare class OurQueryLatencyStats implements Required<IQueryLatencyStats> {
68
+ latencyCount: DurationHistogram;
69
+ requestCount: number;
70
+ requestsWithoutFieldInstrumentation: number;
71
+ cacheHits: number;
72
+ persistedQueryHits: number;
73
+ persistedQueryMisses: number;
74
+ cacheLatencyCount: DurationHistogram;
75
+ rootErrorStats: OurPathErrorStats;
76
+ requestsWithErrorsCount: number;
77
+ publicCacheTtlCount: DurationHistogram;
78
+ privateCacheTtlCount: DurationHistogram;
79
+ registeredOperationCount: number;
80
+ forbiddenOperationCount: number;
81
+ }
82
+ declare class OurPathErrorStats implements Required<IPathErrorStats> {
83
+ children: {
84
+ [k: string]: OurPathErrorStats;
85
+ };
86
+ errorsCount: number;
87
+ requestsWithErrorsCount: number;
88
+ getChild(subPath: string, sizeEstimator: SizeEstimator): OurPathErrorStats;
89
+ }
90
+ declare class OurTypeStat implements Required<ITypeStat> {
91
+ perFieldStat: {
92
+ [k: string]: OurFieldStat;
93
+ };
94
+ getFieldStat(fieldName: string, returnType: string, sizeEstimator: SizeEstimator): OurFieldStat;
95
+ ensureCountsAreIntegers(): void;
96
+ }
97
+ declare class OurFieldStat implements Required<IFieldStat> {
98
+ readonly returnType: string;
99
+ errorsCount: number;
100
+ observedExecutionCount: number;
101
+ estimatedExecutionCount: number;
102
+ requestsWithErrorsCount: number;
103
+ latencyCount: DurationHistogram;
104
+ constructor(returnType: string);
105
+ ensureCountsAreIntegers(): void;
106
+ }
107
+ interface DurationHistogramOptions {
108
+ initSize?: number;
109
+ buckets?: number[];
110
+ }
111
+ declare class DurationHistogram {
112
+ private readonly buckets;
113
+ static readonly BUCKET_COUNT = 384;
114
+ static readonly EXPONENT_LOG: number;
115
+ toArray(): number[];
116
+ static durationToBucket(durationNs: number): number;
117
+ incrementDuration(durationNs: number, value?: number): DurationHistogram;
118
+ incrementBucket(bucket: number, value?: number): void;
119
+ combine(otherHistogram: DurationHistogram): void;
120
+ constructor(options?: DurationHistogramOptions);
121
+ }
122
+ //#endregion
123
+ export { OurReport };