@littlebearapps/platform-consumer-sdk 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +306 -0
- package/package.json +53 -0
- package/src/ai-gateway.ts +305 -0
- package/src/constants.ts +147 -0
- package/src/costs.ts +590 -0
- package/src/do-heartbeat.ts +249 -0
- package/src/dynamic-patterns.ts +273 -0
- package/src/errors.ts +285 -0
- package/src/features.ts +149 -0
- package/src/heartbeat.ts +27 -0
- package/src/index.ts +950 -0
- package/src/logging.ts +543 -0
- package/src/middleware.ts +447 -0
- package/src/patterns.ts +156 -0
- package/src/proxy.ts +732 -0
- package/src/retry.ts +19 -0
- package/src/service-client.ts +291 -0
- package/src/telemetry.ts +342 -0
- package/src/timeout.ts +212 -0
- package/src/tracing.ts +403 -0
- package/src/types.ts +465 -0
package/src/retry.ts
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
export async function withExponentialBackoff<T>(
|
|
2
|
+
operation: () => Promise<T>,
|
|
3
|
+
attempts = 3
|
|
4
|
+
): Promise<T> {
|
|
5
|
+
let lastError: unknown;
|
|
6
|
+
for (let attempt = 0; attempt < attempts; attempt += 1) {
|
|
7
|
+
try {
|
|
8
|
+
return await operation();
|
|
9
|
+
} catch (error) {
|
|
10
|
+
lastError = error;
|
|
11
|
+
if (attempt === attempts - 1) {
|
|
12
|
+
break;
|
|
13
|
+
}
|
|
14
|
+
const delay = Math.min(1000, 2 ** attempt * 100);
|
|
15
|
+
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
throw lastError ?? new Error('Operation failed');
|
|
19
|
+
}
|
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
/// <reference types="@cloudflare/workers-types" />
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Platform SDK Service Client
|
|
5
|
+
*
|
|
6
|
+
* Provides helpers for making inter-service requests with automatic
|
|
7
|
+
* propagation of correlation IDs and trace context.
|
|
8
|
+
*
|
|
9
|
+
* @example
|
|
10
|
+
* ```typescript
|
|
11
|
+
* import { createServiceClient } from './lib/platform-sdk';
|
|
12
|
+
*
|
|
13
|
+
* // Create a client for calling another service
|
|
14
|
+
* const client = createServiceClient(env, 'platform-alert-router');
|
|
15
|
+
*
|
|
16
|
+
* // Make a traced request
|
|
17
|
+
* const response = await client.fetch('https://alert-router/notify', {
|
|
18
|
+
* method: 'POST',
|
|
19
|
+
* body: JSON.stringify({ message: 'Hello' })
|
|
20
|
+
* });
|
|
21
|
+
* ```
|
|
22
|
+
*/
|
|
23
|
+
|
|
24
|
+
import { getCorrelationId } from './logging';
|
|
25
|
+
import { getTraceContext, propagateTraceContext } from './tracing';
|
|
26
|
+
|
|
27
|
+
// =============================================================================
|
|
28
|
+
// TYPES
|
|
29
|
+
// =============================================================================
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Service client for making inter-service requests.
|
|
33
|
+
*/
|
|
34
|
+
export interface ServiceClient {
|
|
35
|
+
/** Make a fetch request with propagated context */
|
|
36
|
+
fetch(input: RequestInfo | URL, init?: RequestInit): Promise<Response>;
|
|
37
|
+
/** Get the current correlation ID */
|
|
38
|
+
readonly correlationId: string;
|
|
39
|
+
/** Get the current trace ID */
|
|
40
|
+
readonly traceId: string | undefined;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Options for creating a service client.
|
|
45
|
+
*/
|
|
46
|
+
export interface ServiceClientOptions {
|
|
47
|
+
/** Target service name (for logging) */
|
|
48
|
+
targetService: string;
|
|
49
|
+
/** Additional default headers */
|
|
50
|
+
defaultHeaders?: Record<string, string>;
|
|
51
|
+
/** Request timeout in milliseconds */
|
|
52
|
+
timeoutMs?: number;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// =============================================================================
|
|
56
|
+
// HEADER CONSTANTS
|
|
57
|
+
// =============================================================================
|
|
58
|
+
|
|
59
|
+
/** Correlation ID header name */
|
|
60
|
+
export const CORRELATION_ID_HEADER = 'x-correlation-id';
|
|
61
|
+
|
|
62
|
+
/** Source service header name */
|
|
63
|
+
export const SOURCE_SERVICE_HEADER = 'x-source-service';
|
|
64
|
+
|
|
65
|
+
/** Target service header name */
|
|
66
|
+
export const TARGET_SERVICE_HEADER = 'x-target-service';
|
|
67
|
+
|
|
68
|
+
/** Feature ID header name */
|
|
69
|
+
export const FEATURE_ID_HEADER = 'x-feature-id';
|
|
70
|
+
|
|
71
|
+
// =============================================================================
|
|
72
|
+
// SERVICE CLIENT
|
|
73
|
+
// =============================================================================
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Create a service client for making inter-service requests.
|
|
77
|
+
* Automatically propagates correlation ID and trace context.
|
|
78
|
+
*
|
|
79
|
+
* @param env - Environment object (tracked or raw)
|
|
80
|
+
* @param sourceService - Name of the calling service
|
|
81
|
+
* @param options - Optional configuration
|
|
82
|
+
* @returns Service client with fetch method
|
|
83
|
+
*/
|
|
84
|
+
export function createServiceClient(
|
|
85
|
+
env: object,
|
|
86
|
+
sourceService: string,
|
|
87
|
+
options: Partial<ServiceClientOptions> = {}
|
|
88
|
+
): ServiceClient {
|
|
89
|
+
const correlationId = getCorrelationId(env);
|
|
90
|
+
const traceContext = getTraceContext(env);
|
|
91
|
+
|
|
92
|
+
const { defaultHeaders = {}, timeoutMs } = options;
|
|
93
|
+
|
|
94
|
+
return {
|
|
95
|
+
get correlationId() {
|
|
96
|
+
return correlationId;
|
|
97
|
+
},
|
|
98
|
+
|
|
99
|
+
get traceId() {
|
|
100
|
+
return traceContext?.traceId;
|
|
101
|
+
},
|
|
102
|
+
|
|
103
|
+
async fetch(input: RequestInfo | URL, init?: RequestInit): Promise<Response> {
|
|
104
|
+
// Build headers with propagated context
|
|
105
|
+
const headers = new Headers(init?.headers);
|
|
106
|
+
|
|
107
|
+
// Add correlation ID
|
|
108
|
+
headers.set(CORRELATION_ID_HEADER, correlationId);
|
|
109
|
+
|
|
110
|
+
// Add source service
|
|
111
|
+
headers.set(SOURCE_SERVICE_HEADER, sourceService);
|
|
112
|
+
|
|
113
|
+
// Add target service if known
|
|
114
|
+
if (options.targetService) {
|
|
115
|
+
headers.set(TARGET_SERVICE_HEADER, options.targetService);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// Add trace context headers
|
|
119
|
+
if (traceContext) {
|
|
120
|
+
const traceHeaders = propagateTraceContext(traceContext);
|
|
121
|
+
traceHeaders.forEach((value, key) => {
|
|
122
|
+
headers.set(key, value);
|
|
123
|
+
});
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// Add default headers
|
|
127
|
+
for (const [key, value] of Object.entries(defaultHeaders)) {
|
|
128
|
+
if (!headers.has(key)) {
|
|
129
|
+
headers.set(key, value);
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// Build request options
|
|
134
|
+
const requestInit: RequestInit = {
|
|
135
|
+
...init,
|
|
136
|
+
headers,
|
|
137
|
+
};
|
|
138
|
+
|
|
139
|
+
// Apply timeout if specified
|
|
140
|
+
if (timeoutMs) {
|
|
141
|
+
const controller = new AbortController();
|
|
142
|
+
const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
|
|
143
|
+
|
|
144
|
+
try {
|
|
145
|
+
return await fetch(input, {
|
|
146
|
+
...requestInit,
|
|
147
|
+
signal: controller.signal,
|
|
148
|
+
});
|
|
149
|
+
} finally {
|
|
150
|
+
clearTimeout(timeoutId);
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
return fetch(input, requestInit);
|
|
155
|
+
},
|
|
156
|
+
};
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
// =============================================================================
|
|
160
|
+
// SERVICE BINDING HELPERS
|
|
161
|
+
// =============================================================================
|
|
162
|
+
|
|
163
|
+
/**
|
|
164
|
+
* Create headers for calling a service binding (Fetcher).
|
|
165
|
+
* Use this when calling env.SERVICE_BINDING.fetch() directly.
|
|
166
|
+
*
|
|
167
|
+
* @param env - Environment object
|
|
168
|
+
* @param sourceService - Name of the calling service
|
|
169
|
+
* @returns Headers object with propagated context
|
|
170
|
+
*/
|
|
171
|
+
export function createServiceBindingHeaders(env: object, sourceService: string): Headers {
|
|
172
|
+
const headers = new Headers();
|
|
173
|
+
|
|
174
|
+
// Add correlation ID
|
|
175
|
+
headers.set(CORRELATION_ID_HEADER, getCorrelationId(env));
|
|
176
|
+
|
|
177
|
+
// Add source service
|
|
178
|
+
headers.set(SOURCE_SERVICE_HEADER, sourceService);
|
|
179
|
+
|
|
180
|
+
// Add trace context
|
|
181
|
+
const traceContext = getTraceContext(env);
|
|
182
|
+
if (traceContext) {
|
|
183
|
+
const traceHeaders = propagateTraceContext(traceContext);
|
|
184
|
+
traceHeaders.forEach((value, key) => {
|
|
185
|
+
headers.set(key, value);
|
|
186
|
+
});
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
return headers;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
/**
|
|
193
|
+
* Wrap a service binding (Fetcher) with automatic context propagation.
|
|
194
|
+
*
|
|
195
|
+
* @param fetcher - Service binding (Fetcher)
|
|
196
|
+
* @param env - Environment object
|
|
197
|
+
* @param sourceService - Name of the calling service
|
|
198
|
+
* @returns Wrapped fetcher that propagates context
|
|
199
|
+
*
|
|
200
|
+
* @example
|
|
201
|
+
* ```typescript
|
|
202
|
+
* const tracedAlertRouter = wrapServiceBinding(
|
|
203
|
+
* env.ALERT_ROUTER,
|
|
204
|
+
* env,
|
|
205
|
+
* 'platform-usage'
|
|
206
|
+
* );
|
|
207
|
+
*
|
|
208
|
+
* await tracedAlertRouter.fetch('https://alert-router/notify', {
|
|
209
|
+
* method: 'POST',
|
|
210
|
+
* body: JSON.stringify({ message: 'Alert!' })
|
|
211
|
+
* });
|
|
212
|
+
* ```
|
|
213
|
+
*/
|
|
214
|
+
export function wrapServiceBinding(fetcher: Fetcher, env: object, sourceService: string): Fetcher {
|
|
215
|
+
return {
|
|
216
|
+
fetch: async (input: RequestInfo | URL, init?: RequestInit) => {
|
|
217
|
+
const headers = new Headers(init?.headers);
|
|
218
|
+
|
|
219
|
+
// Merge in service binding headers
|
|
220
|
+
const contextHeaders = createServiceBindingHeaders(env, sourceService);
|
|
221
|
+
contextHeaders.forEach((value, key) => {
|
|
222
|
+
headers.set(key, value);
|
|
223
|
+
});
|
|
224
|
+
|
|
225
|
+
return fetcher.fetch(input, {
|
|
226
|
+
...init,
|
|
227
|
+
headers,
|
|
228
|
+
});
|
|
229
|
+
},
|
|
230
|
+
connect: fetcher.connect,
|
|
231
|
+
};
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
// =============================================================================
|
|
235
|
+
// CORRELATION CHAIN EXTRACTION
|
|
236
|
+
// =============================================================================
|
|
237
|
+
|
|
238
|
+
/**
|
|
239
|
+
* Extract correlation chain from request headers.
|
|
240
|
+
* Returns information about the calling service and trace.
|
|
241
|
+
*/
|
|
242
|
+
export interface CorrelationChain {
|
|
243
|
+
correlationId: string;
|
|
244
|
+
sourceService?: string;
|
|
245
|
+
targetService?: string;
|
|
246
|
+
featureId?: string;
|
|
247
|
+
traceId?: string;
|
|
248
|
+
spanId?: string;
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
/**
|
|
252
|
+
* Extract correlation chain from incoming request.
|
|
253
|
+
*
|
|
254
|
+
* @param request - Incoming request
|
|
255
|
+
* @returns Correlation chain information
|
|
256
|
+
*/
|
|
257
|
+
export function extractCorrelationChain(request: Request): CorrelationChain {
|
|
258
|
+
return {
|
|
259
|
+
correlationId:
|
|
260
|
+
request.headers.get(CORRELATION_ID_HEADER) ||
|
|
261
|
+
request.headers.get('x-request-id') ||
|
|
262
|
+
crypto.randomUUID(),
|
|
263
|
+
sourceService: request.headers.get(SOURCE_SERVICE_HEADER) || undefined,
|
|
264
|
+
targetService: request.headers.get(TARGET_SERVICE_HEADER) || undefined,
|
|
265
|
+
featureId: request.headers.get(FEATURE_ID_HEADER) || undefined,
|
|
266
|
+
traceId: extractTraceIdFromRequest(request),
|
|
267
|
+
spanId: extractSpanIdFromRequest(request),
|
|
268
|
+
};
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
/**
|
|
272
|
+
* Extract trace ID from traceparent header.
|
|
273
|
+
*/
|
|
274
|
+
function extractTraceIdFromRequest(request: Request): string | undefined {
|
|
275
|
+
const traceparent = request.headers.get('traceparent');
|
|
276
|
+
if (!traceparent) return undefined;
|
|
277
|
+
|
|
278
|
+
const parts = traceparent.split('-');
|
|
279
|
+
return parts.length >= 2 ? parts[1] : undefined;
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
/**
|
|
283
|
+
* Extract span ID from traceparent header.
|
|
284
|
+
*/
|
|
285
|
+
function extractSpanIdFromRequest(request: Request): string | undefined {
|
|
286
|
+
const traceparent = request.headers.get('traceparent');
|
|
287
|
+
if (!traceparent) return undefined;
|
|
288
|
+
|
|
289
|
+
const parts = traceparent.split('-');
|
|
290
|
+
return parts.length >= 3 ? parts[2] : undefined;
|
|
291
|
+
}
|
package/src/telemetry.ts
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
1
|
+
/// <reference types="@cloudflare/workers-types" />
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Platform SDK Telemetry
|
|
5
|
+
*
|
|
6
|
+
* Queue-based telemetry for reporting feature usage metrics.
|
|
7
|
+
* Uses waitUntil for non-blocking telemetry submission.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
import type { FeatureId, FeatureMetrics, MetricsAccumulator, TelemetryMessage } from './types';
|
|
11
|
+
import { createLogger, type Logger } from './logging';
|
|
12
|
+
|
|
13
|
+
// =============================================================================
|
|
14
|
+
// MODULE LOGGER (lazy-initialized to avoid global scope crypto calls)
|
|
15
|
+
// =============================================================================
|
|
16
|
+
|
|
17
|
+
let _log: Logger | null = null;
|
|
18
|
+
function getLog(): Logger {
|
|
19
|
+
if (!_log) {
|
|
20
|
+
_log = createLogger({
|
|
21
|
+
worker: 'platform-sdk',
|
|
22
|
+
featureId: 'platform:sdk:telemetry',
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
return _log;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
// =============================================================================
|
|
29
|
+
// TYPES
|
|
30
|
+
// =============================================================================
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Telemetry context for a single request.
|
|
34
|
+
*/
|
|
35
|
+
export interface TelemetryContext {
|
|
36
|
+
featureId: FeatureId;
|
|
37
|
+
metrics: MetricsAccumulator;
|
|
38
|
+
startTime: number;
|
|
39
|
+
queue?: Queue<TelemetryMessage>;
|
|
40
|
+
ctx?: ExecutionContext;
|
|
41
|
+
correlationId?: string;
|
|
42
|
+
/** W3C Trace ID for distributed tracing */
|
|
43
|
+
traceId?: string;
|
|
44
|
+
/** W3C Span ID for distributed tracing */
|
|
45
|
+
spanId?: string;
|
|
46
|
+
/** External API cost in USD (e.g., OpenAI, Apify) */
|
|
47
|
+
externalCostUsd?: number;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// =============================================================================
|
|
51
|
+
// CONTEXT MANAGEMENT
|
|
52
|
+
// =============================================================================
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* WeakMap to store telemetry context per proxied environment.
|
|
56
|
+
* Using WeakMap allows garbage collection when env is no longer referenced.
|
|
57
|
+
*/
|
|
58
|
+
const telemetryContexts = new WeakMap<object, TelemetryContext>();
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Store telemetry context for a proxied environment.
|
|
62
|
+
*/
|
|
63
|
+
export function setTelemetryContext(env: object, context: TelemetryContext): void {
|
|
64
|
+
telemetryContexts.set(env, context);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Get telemetry context for a proxied environment.
|
|
69
|
+
*/
|
|
70
|
+
export function getTelemetryContext(env: object): TelemetryContext | undefined {
|
|
71
|
+
return telemetryContexts.get(env);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Remove telemetry context for a proxied environment.
|
|
76
|
+
*/
|
|
77
|
+
export function clearTelemetryContext(env: object): void {
|
|
78
|
+
telemetryContexts.delete(env);
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// =============================================================================
|
|
82
|
+
// TELEMETRY REPORTING
|
|
83
|
+
// =============================================================================
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Parse a feature ID into its component parts.
|
|
87
|
+
*/
|
|
88
|
+
function parseFeatureId(featureId: FeatureId): {
|
|
89
|
+
project: string;
|
|
90
|
+
category: string;
|
|
91
|
+
feature: string;
|
|
92
|
+
} {
|
|
93
|
+
const parts = featureId.split(':');
|
|
94
|
+
if (parts.length !== 3) {
|
|
95
|
+
throw new Error(
|
|
96
|
+
`Invalid featureId format: "${featureId}". Expected "project:category:feature"`
|
|
97
|
+
);
|
|
98
|
+
}
|
|
99
|
+
return {
|
|
100
|
+
project: parts[0],
|
|
101
|
+
category: parts[1],
|
|
102
|
+
feature: parts[2],
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
/**
|
|
107
|
+
* Convert MetricsAccumulator to FeatureMetrics, excluding zero values.
|
|
108
|
+
*/
|
|
109
|
+
function accumulatorToMetrics(accumulator: MetricsAccumulator): FeatureMetrics {
|
|
110
|
+
const metrics: FeatureMetrics = {};
|
|
111
|
+
|
|
112
|
+
// Only include non-zero values
|
|
113
|
+
if (accumulator.d1Writes > 0) metrics.d1Writes = accumulator.d1Writes;
|
|
114
|
+
if (accumulator.d1Reads > 0) metrics.d1Reads = accumulator.d1Reads;
|
|
115
|
+
if (accumulator.d1RowsRead > 0) metrics.d1RowsRead = accumulator.d1RowsRead;
|
|
116
|
+
if (accumulator.d1RowsWritten > 0) metrics.d1RowsWritten = accumulator.d1RowsWritten;
|
|
117
|
+
if (accumulator.kvReads > 0) metrics.kvReads = accumulator.kvReads;
|
|
118
|
+
if (accumulator.kvWrites > 0) metrics.kvWrites = accumulator.kvWrites;
|
|
119
|
+
if (accumulator.kvDeletes > 0) metrics.kvDeletes = accumulator.kvDeletes;
|
|
120
|
+
if (accumulator.kvLists > 0) metrics.kvLists = accumulator.kvLists;
|
|
121
|
+
if (accumulator.aiRequests > 0) metrics.aiRequests = accumulator.aiRequests;
|
|
122
|
+
if (accumulator.aiNeurons > 0) metrics.aiNeurons = accumulator.aiNeurons;
|
|
123
|
+
// Convert Map to object for JSON serialization
|
|
124
|
+
if (accumulator.aiModelCounts.size > 0) {
|
|
125
|
+
metrics.aiModelBreakdown = Object.fromEntries(accumulator.aiModelCounts);
|
|
126
|
+
}
|
|
127
|
+
if (accumulator.vectorizeQueries > 0) metrics.vectorizeQueries = accumulator.vectorizeQueries;
|
|
128
|
+
if (accumulator.vectorizeInserts > 0) metrics.vectorizeInserts = accumulator.vectorizeInserts;
|
|
129
|
+
// vectorizeDeletes removed - Analytics Engine 20 double limit
|
|
130
|
+
|
|
131
|
+
// R2
|
|
132
|
+
if (accumulator.r2ClassA > 0) metrics.r2ClassA = accumulator.r2ClassA;
|
|
133
|
+
if (accumulator.r2ClassB > 0) metrics.r2ClassB = accumulator.r2ClassB;
|
|
134
|
+
|
|
135
|
+
// Queue
|
|
136
|
+
if (accumulator.queueMessages > 0) metrics.queueMessages = accumulator.queueMessages;
|
|
137
|
+
|
|
138
|
+
// Durable Objects
|
|
139
|
+
if (accumulator.doRequests > 0) metrics.doRequests = accumulator.doRequests;
|
|
140
|
+
|
|
141
|
+
// DO latency stats (only if we have samples)
|
|
142
|
+
if (accumulator.doRequests > 0 && accumulator.doLatencyMs.length > 0) {
|
|
143
|
+
const sorted = [...accumulator.doLatencyMs].sort((a, b) => a - b);
|
|
144
|
+
metrics.doAvgLatencyMs = accumulator.doTotalLatencyMs / accumulator.doLatencyMs.length;
|
|
145
|
+
metrics.doMaxLatencyMs = sorted[sorted.length - 1];
|
|
146
|
+
const p99Index = Math.ceil(sorted.length * 0.99) - 1;
|
|
147
|
+
metrics.doP99LatencyMs = sorted[Math.max(0, p99Index)];
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
// Workflow
|
|
151
|
+
if (accumulator.workflowInvocations > 0)
|
|
152
|
+
metrics.workflowInvocations = accumulator.workflowInvocations;
|
|
153
|
+
|
|
154
|
+
return metrics;
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
/**
|
|
158
|
+
* Check if a telemetry message has any data worth reporting.
|
|
159
|
+
*/
|
|
160
|
+
function hasDataToReport(message: TelemetryMessage): boolean {
|
|
161
|
+
const hasMetrics = Object.values(message.metrics).some((v) => typeof v === 'number' && v > 0);
|
|
162
|
+
const hasErrors = (message.error_count ?? 0) > 0;
|
|
163
|
+
return hasMetrics || hasErrors;
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
/**
|
|
167
|
+
* Build telemetry message from context.
|
|
168
|
+
*/
|
|
169
|
+
function buildTelemetryMessage(context: TelemetryContext): TelemetryMessage {
|
|
170
|
+
const { project, category, feature } = parseFeatureId(context.featureId);
|
|
171
|
+
const metrics = accumulatorToMetrics(context.metrics);
|
|
172
|
+
|
|
173
|
+
const message: TelemetryMessage = {
|
|
174
|
+
feature_key: context.featureId,
|
|
175
|
+
project,
|
|
176
|
+
category,
|
|
177
|
+
feature,
|
|
178
|
+
metrics,
|
|
179
|
+
timestamp: Date.now(),
|
|
180
|
+
};
|
|
181
|
+
|
|
182
|
+
// Include error information if present
|
|
183
|
+
if (context.metrics.errorCount > 0) {
|
|
184
|
+
message.error_count = context.metrics.errorCount;
|
|
185
|
+
}
|
|
186
|
+
if (context.metrics.lastErrorCategory) {
|
|
187
|
+
message.error_category = context.metrics.lastErrorCategory;
|
|
188
|
+
}
|
|
189
|
+
if (context.metrics.errorCodes.length > 0) {
|
|
190
|
+
message.error_codes = context.metrics.errorCodes;
|
|
191
|
+
}
|
|
192
|
+
if (context.correlationId) {
|
|
193
|
+
message.correlation_id = context.correlationId;
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
// Include request duration (wall-clock time)
|
|
197
|
+
const durationMs = Date.now() - context.startTime;
|
|
198
|
+
if (durationMs > 0) {
|
|
199
|
+
message.request_duration_ms = durationMs;
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// Include distributed tracing context
|
|
203
|
+
if (context.traceId) {
|
|
204
|
+
message.trace_id = context.traceId;
|
|
205
|
+
}
|
|
206
|
+
if (context.spanId) {
|
|
207
|
+
message.span_id = context.spanId;
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
// Include external cost if provided
|
|
211
|
+
if (context.externalCostUsd && context.externalCostUsd > 0) {
|
|
212
|
+
message.external_cost_usd = context.externalCostUsd;
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
return message;
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
/**
|
|
219
|
+
* Flush metrics to the telemetry queue.
|
|
220
|
+
*
|
|
221
|
+
* This is called automatically when the request completes.
|
|
222
|
+
* Uses waitUntil if ExecutionContext is available, otherwise awaits directly.
|
|
223
|
+
*
|
|
224
|
+
* @param env - The proxied environment object
|
|
225
|
+
* @returns Promise that resolves when flush is scheduled (not completed)
|
|
226
|
+
*/
|
|
227
|
+
export async function flushMetrics(env: object): Promise<void> {
|
|
228
|
+
const context = getTelemetryContext(env);
|
|
229
|
+
if (!context) {
|
|
230
|
+
getLog().warn('flushMetrics called without telemetry context');
|
|
231
|
+
return;
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
// Build the message
|
|
235
|
+
const message = buildTelemetryMessage(context);
|
|
236
|
+
|
|
237
|
+
// Skip if nothing to report
|
|
238
|
+
if (!hasDataToReport(message)) {
|
|
239
|
+
clearTelemetryContext(env);
|
|
240
|
+
return;
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
// Send to queue - always await to ensure send completes
|
|
244
|
+
// Also use waitUntil if ctx is available as a safety net
|
|
245
|
+
const sendPromise = sendToQueue(context.queue, message);
|
|
246
|
+
if (context.ctx?.waitUntil) {
|
|
247
|
+
context.ctx.waitUntil(sendPromise);
|
|
248
|
+
}
|
|
249
|
+
await sendPromise;
|
|
250
|
+
|
|
251
|
+
// Clean up context
|
|
252
|
+
clearTelemetryContext(env);
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
/**
|
|
256
|
+
* Send telemetry message to queue with error handling.
|
|
257
|
+
* Fails open - errors are logged but don't break the request.
|
|
258
|
+
*/
|
|
259
|
+
async function sendToQueue(
|
|
260
|
+
queue: Queue<TelemetryMessage> | undefined,
|
|
261
|
+
message: TelemetryMessage
|
|
262
|
+
): Promise<void> {
|
|
263
|
+
if (!queue) {
|
|
264
|
+
// No queue binding - log warning but don't fail
|
|
265
|
+
getLog().warn('No PLATFORM_TELEMETRY queue binding, metrics not sent', undefined, {
|
|
266
|
+
featureKey: message.feature_key,
|
|
267
|
+
});
|
|
268
|
+
return;
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
try {
|
|
272
|
+
await queue.send(message);
|
|
273
|
+
getLog().debug('Telemetry sent', {
|
|
274
|
+
featureKey: message.feature_key,
|
|
275
|
+
metrics: message.metrics,
|
|
276
|
+
});
|
|
277
|
+
} catch (error) {
|
|
278
|
+
// Fail open - log error but don't throw
|
|
279
|
+
getLog().error('Failed to send telemetry', error, { featureKey: message.feature_key });
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
/**
|
|
284
|
+
* Schedule telemetry flush using waitUntil.
|
|
285
|
+
* This is the preferred method when ExecutionContext is available.
|
|
286
|
+
*
|
|
287
|
+
* @param ctx - ExecutionContext from the worker
|
|
288
|
+
* @param env - The proxied environment object
|
|
289
|
+
*/
|
|
290
|
+
export function scheduleFlush(ctx: ExecutionContext, env: object): void {
|
|
291
|
+
ctx.waitUntil(flushMetrics(env));
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
// =============================================================================
|
|
295
|
+
// DIRECT REPORTING (for use outside proxied context)
|
|
296
|
+
// =============================================================================
|
|
297
|
+
|
|
298
|
+
/**
|
|
299
|
+
* Check if metrics have any non-zero values.
|
|
300
|
+
*/
|
|
301
|
+
function hasMetrics(metrics: FeatureMetrics): boolean {
|
|
302
|
+
return Object.values(metrics).some((v) => typeof v === 'number' && v > 0);
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
/**
|
|
306
|
+
* Report usage metrics directly without proxy tracking.
|
|
307
|
+
* Use this for manual metric reporting in edge cases.
|
|
308
|
+
*
|
|
309
|
+
* @param featureId - Feature identifier (project:category:feature)
|
|
310
|
+
* @param metrics - Metrics to report
|
|
311
|
+
* @param queue - Telemetry queue binding
|
|
312
|
+
* @param ctx - Optional ExecutionContext for waitUntil
|
|
313
|
+
*/
|
|
314
|
+
export async function reportUsage(
|
|
315
|
+
featureId: FeatureId,
|
|
316
|
+
metrics: FeatureMetrics,
|
|
317
|
+
queue: Queue<TelemetryMessage>,
|
|
318
|
+
ctx?: ExecutionContext
|
|
319
|
+
): Promise<void> {
|
|
320
|
+
if (!hasMetrics(metrics)) {
|
|
321
|
+
return;
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
const { project, category, feature } = parseFeatureId(featureId);
|
|
325
|
+
|
|
326
|
+
const message: TelemetryMessage = {
|
|
327
|
+
feature_key: featureId,
|
|
328
|
+
project,
|
|
329
|
+
category,
|
|
330
|
+
feature,
|
|
331
|
+
metrics,
|
|
332
|
+
timestamp: Date.now(),
|
|
333
|
+
};
|
|
334
|
+
|
|
335
|
+
const sendPromise = sendToQueue(queue, message);
|
|
336
|
+
|
|
337
|
+
if (ctx?.waitUntil) {
|
|
338
|
+
ctx.waitUntil(sendPromise);
|
|
339
|
+
} else {
|
|
340
|
+
await sendPromise;
|
|
341
|
+
}
|
|
342
|
+
}
|