@littlebearapps/platform-admin-sdk 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. package/README.md +112 -0
  2. package/dist/index.d.ts +16 -0
  3. package/dist/index.js +89 -0
  4. package/dist/prompts.d.ts +27 -0
  5. package/dist/prompts.js +80 -0
  6. package/dist/scaffold.d.ts +5 -0
  7. package/dist/scaffold.js +65 -0
  8. package/dist/templates.d.ts +16 -0
  9. package/dist/templates.js +131 -0
  10. package/package.json +46 -0
  11. package/templates/full/migrations/006_pattern_discovery.sql +199 -0
  12. package/templates/full/migrations/007_notifications_search.sql +127 -0
  13. package/templates/full/workers/lib/pattern-discovery/ai-prompt.ts +644 -0
  14. package/templates/full/workers/lib/pattern-discovery/clustering.ts +278 -0
  15. package/templates/full/workers/lib/pattern-discovery/shadow-evaluation.ts +603 -0
  16. package/templates/full/workers/lib/pattern-discovery/storage.ts +806 -0
  17. package/templates/full/workers/lib/pattern-discovery/types.ts +159 -0
  18. package/templates/full/workers/lib/pattern-discovery/validation.ts +278 -0
  19. package/templates/full/workers/pattern-discovery.ts +661 -0
  20. package/templates/full/workers/platform-alert-router.ts +1809 -0
  21. package/templates/full/workers/platform-notifications.ts +424 -0
  22. package/templates/full/workers/platform-search.ts +480 -0
  23. package/templates/full/workers/platform-settings.ts +436 -0
  24. package/templates/full/wrangler.alert-router.jsonc.hbs +34 -0
  25. package/templates/full/wrangler.notifications.jsonc.hbs +23 -0
  26. package/templates/full/wrangler.pattern-discovery.jsonc.hbs +33 -0
  27. package/templates/full/wrangler.search.jsonc.hbs +16 -0
  28. package/templates/full/wrangler.settings.jsonc.hbs +23 -0
  29. package/templates/shared/README.md.hbs +69 -0
  30. package/templates/shared/config/budgets.yaml.hbs +72 -0
  31. package/templates/shared/config/services.yaml.hbs +45 -0
  32. package/templates/shared/migrations/001_core_tables.sql +117 -0
  33. package/templates/shared/migrations/002_usage_warehouse.sql +830 -0
  34. package/templates/shared/migrations/003_feature_tracking.sql +250 -0
  35. package/templates/shared/migrations/004_settings_alerts.sql +452 -0
  36. package/templates/shared/migrations/seed.sql.hbs +4 -0
  37. package/templates/shared/package.json.hbs +21 -0
  38. package/templates/shared/scripts/sync-config.ts +242 -0
  39. package/templates/shared/tsconfig.json +12 -0
  40. package/templates/shared/workers/lib/analytics-engine.ts +357 -0
  41. package/templates/shared/workers/lib/billing.ts +293 -0
  42. package/templates/shared/workers/lib/circuit-breaker-middleware.ts +25 -0
  43. package/templates/shared/workers/lib/control.ts +292 -0
  44. package/templates/shared/workers/lib/economics.ts +368 -0
  45. package/templates/shared/workers/lib/metrics.ts +103 -0
  46. package/templates/shared/workers/lib/platform-settings.ts +407 -0
  47. package/templates/shared/workers/lib/shared/allowances.ts +333 -0
  48. package/templates/shared/workers/lib/shared/cloudflare.ts +1362 -0
  49. package/templates/shared/workers/lib/shared/types.ts +58 -0
  50. package/templates/shared/workers/lib/telemetry-sampling.ts +360 -0
  51. package/templates/shared/workers/lib/usage/collectors/example.ts +96 -0
  52. package/templates/shared/workers/lib/usage/collectors/index.ts +128 -0
  53. package/templates/shared/workers/lib/usage/handlers/audit.ts +306 -0
  54. package/templates/shared/workers/lib/usage/handlers/backfill.ts +845 -0
  55. package/templates/shared/workers/lib/usage/handlers/behavioral.ts +429 -0
  56. package/templates/shared/workers/lib/usage/handlers/data-queries.ts +507 -0
  57. package/templates/shared/workers/lib/usage/handlers/dlq-admin.ts +364 -0
  58. package/templates/shared/workers/lib/usage/handlers/health-trends.ts +222 -0
  59. package/templates/shared/workers/lib/usage/handlers/index.ts +35 -0
  60. package/templates/shared/workers/lib/usage/handlers/usage-admin.ts +421 -0
  61. package/templates/shared/workers/lib/usage/handlers/usage-features.ts +1262 -0
  62. package/templates/shared/workers/lib/usage/handlers/usage-metrics.ts +2420 -0
  63. package/templates/shared/workers/lib/usage/handlers/usage-settings.ts +610 -0
  64. package/templates/shared/workers/lib/usage/queue/budget-enforcement.ts +1032 -0
  65. package/templates/shared/workers/lib/usage/queue/cost-budget-enforcement.ts +128 -0
  66. package/templates/shared/workers/lib/usage/queue/cost-calculator.ts +77 -0
  67. package/templates/shared/workers/lib/usage/queue/dlq-handler.ts +161 -0
  68. package/templates/shared/workers/lib/usage/queue/index.ts +19 -0
  69. package/templates/shared/workers/lib/usage/queue/telemetry-processor.ts +790 -0
  70. package/templates/shared/workers/lib/usage/scheduled/anomaly-detection.ts +732 -0
  71. package/templates/shared/workers/lib/usage/scheduled/data-collection.ts +956 -0
  72. package/templates/shared/workers/lib/usage/scheduled/error-digest.ts +343 -0
  73. package/templates/shared/workers/lib/usage/scheduled/index.ts +18 -0
  74. package/templates/shared/workers/lib/usage/scheduled/rollups.ts +1561 -0
  75. package/templates/shared/workers/lib/usage/shared/constants.ts +362 -0
  76. package/templates/shared/workers/lib/usage/shared/index.ts +14 -0
  77. package/templates/shared/workers/lib/usage/shared/types.ts +1066 -0
  78. package/templates/shared/workers/lib/usage/shared/utils.ts +795 -0
  79. package/templates/shared/workers/platform-usage.ts +1915 -0
  80. package/templates/shared/wrangler.usage.jsonc.hbs +58 -0
  81. package/templates/standard/migrations/005_error_collection.sql +162 -0
  82. package/templates/standard/workers/error-collector.ts +2670 -0
  83. package/templates/standard/workers/lib/error-collector/capture.ts +213 -0
  84. package/templates/standard/workers/lib/error-collector/digest.ts +448 -0
  85. package/templates/standard/workers/lib/error-collector/email-health-alerts.ts +262 -0
  86. package/templates/standard/workers/lib/error-collector/fingerprint.ts +258 -0
  87. package/templates/standard/workers/lib/error-collector/gap-alerts.ts +293 -0
  88. package/templates/standard/workers/lib/error-collector/github.ts +329 -0
  89. package/templates/standard/workers/lib/error-collector/types.ts +262 -0
  90. package/templates/standard/workers/lib/sentinel/gap-detection.ts +734 -0
  91. package/templates/standard/workers/lib/shared/slack-alerts.ts +585 -0
  92. package/templates/standard/workers/platform-sentinel.ts +1744 -0
  93. package/templates/standard/wrangler.error-collector.jsonc.hbs +44 -0
  94. package/templates/standard/wrangler.sentinel.jsonc.hbs +45 -0
@@ -0,0 +1,1915 @@
1
+ /**
2
+ * Platform Usage Worker (Data Warehouse)
3
+ *
4
+ * Provides unified Cloudflare account usage metrics with D1-backed storage,
5
+ * adaptive sampling, circuit breaker protection, and anomaly detection.
6
+ *
7
+ * Architecture:
8
+ * - 2-tier time-series rollup: daily (90d) → monthly (forever)
9
+ * - Feature-level SDK telemetry via Analytics Engine (captured by Platform SDK)
10
+ * - Scheduled handler collects data daily at midnight UTC
11
+ * - Hybrid circuit breaker: soft for this worker, hard 503 for registered projects
12
+ *
13
+ * Endpoints:
14
+ * - GET /usage - Get usage metrics with cost breakdown
15
+ * - GET /usage/costs - Get cost breakdown only (lighter endpoint)
16
+ * - GET /usage/thresholds - Get threshold warnings only
17
+ * - GET /usage/enhanced - Get enhanced usage metrics with sparklines and trends
18
+ * - GET /usage/compare - Get period comparison (task-17.3, 17.4)
19
+ * - GET /usage/workersai - Get Workers AI usage from Analytics Engine (15-min cache)
20
+ * - GET /usage/daily - Get daily cost breakdown for chart/table (task-18)
21
+ * - GET /usage/settings - Get alert threshold configuration (task-17.16)
22
+ * - PUT /usage/settings - Update alert threshold configuration (task-17.16)
23
+ * - GET /usage/settings/verify - Verify all expected settings exist in D1 (task-55)
24
+ * - GET /usage/live - Real-time KV data (circuit breakers, sampling mode) - requires X-API-Key
25
+ * - GET /usage/features - Feature-level usage from Analytics Engine (Phase 4)
26
+ * - GET /usage/features/circuit-breakers - Feature circuit breaker states (Phase 4)
27
+ * - PUT /usage/features/circuit-breakers - Toggle feature circuit breaker (Phase 4)
28
+ * - GET /usage/features/budgets - Feature budgets configuration (Phase 4)
29
+ * - PUT /usage/features/budgets - Update feature budgets configuration (Phase 4)
30
+ * - GET /usage/features/history - Historical feature usage from D1 (Phase 5.2)
31
+ * - GET /usage/features/circuit-breaker-events - Circuit breaker event log from D1 (Phase 5.3)
32
+ * - GET /usage/query - Time-bucketed usage aggregation from Analytics Engine (Dashboard data layer)
33
+ * - GET /usage/health-trends - Project health trends over time (AI Judge Phase 2)
34
+ * - GET /usage/health-trends/latest - Latest health scores summary (AI Judge Phase 2)
35
+ *
36
+ * Query params:
37
+ * - period: '24h' | '7d' | '30d' (default: '30d')
38
+ * - project: 'all' | <your-project-ids> (default: 'all')
39
+ * - compare: 'none' | 'lastMonth' | 'custom' (default: 'none')
40
+ * - startDate: ISO date (YYYY-MM-DD) - for compare=custom
41
+ * - endDate: ISO date (YYYY-MM-DD) - for compare=custom
42
+ * - priorStartDate: ISO date (YYYY-MM-DD) - optional, for compare=custom
43
+ * - priorEndDate: ISO date (YYYY-MM-DD) - optional, for compare=custom
44
+ *
45
+ * Scheduled:
46
+ * - Cron: 0 0 * * * (daily at midnight UTC)
47
+ * - Collects from Cloudflare GraphQL, Analytics Engine, GitHub Billing
48
+ * - Persists to D1, runs all rollups (daily, feature usage, AI model breakdowns)
49
+ * - SDK telemetry flows via queue handler → Analytics Engine → daily rollup
50
+ */
51
+
52
+ import type {
53
+ KVNamespace,
54
+ ExecutionContext,
55
+ D1Database,
56
+ ScheduledEvent,
57
+ Queue,
58
+ MessageBatch,
59
+ AnalyticsEngineDataset,
60
+ Fetcher,
61
+ } from '@cloudflare/workers-types';
62
+ import {
63
+ withFeatureBudget,
64
+ createLoggerFromEnv,
65
+ createLoggerFromRequest,
66
+ createTraceContext,
67
+ health,
68
+ HEARTBEAT_HEALTH,
69
+ type TelemetryMessage,
70
+ type FeatureMetrics,
71
+ type ErrorCategory,
72
+ type Logger,
73
+ } from '@littlebearapps/platform-consumer-sdk';
74
+ import { CB_STATUS, type CircuitBreakerStatusValue } from './lib/circuit-breaker-middleware';
75
+ import { HARD_LIMIT_MULTIPLIER } from './lib/usage/queue/budget-enforcement';
76
+ import { METRIC_FIELDS } from '@littlebearapps/platform-consumer-sdk';
77
+ import {
78
+ calculateBillingPeriod,
79
+ calculateBillableUsage,
80
+ prorateAllowance,
81
+ getDefaultBillingSettings,
82
+ type BillingSettings,
83
+ type BillingPeriod,
84
+ } from './lib/billing';
85
+ import {
86
+ getPlatformSettings as getPlatformSettingsFromLib,
87
+ getSetting,
88
+ getProjectSetting,
89
+ getUtilizationStatus,
90
+ DEFAULT_PLATFORM_SETTINGS,
91
+ SETTING_KEY_MAP,
92
+ EXPECTED_SETTINGS_KEYS,
93
+ type PlatformSettings,
94
+ type SettingsEnv,
95
+ } from './lib/platform-settings';
96
+ import {
97
+ CloudflareGraphQL,
98
+ type TimePeriod,
99
+ type DateRange,
100
+ type CompareMode,
101
+ type AccountUsage,
102
+ calculateMonthlyCosts,
103
+ calculateProjectCosts,
104
+ calculateDailyCosts,
105
+ analyseThresholds,
106
+ identifyProject,
107
+ formatCurrency,
108
+ DEFAULT_ALERT_THRESHOLDS,
109
+ mergeThresholds,
110
+ type CostBreakdown,
111
+ type ProjectCostBreakdown,
112
+ type ThresholdAnalysis,
113
+ type SparklineData,
114
+ type WorkersErrorBreakdown,
115
+ type QueuesMetrics,
116
+ type CacheAnalytics,
117
+ type PeriodComparison,
118
+ type AlertThresholds,
119
+ type ServiceThreshold,
120
+ type WorkersAISummary,
121
+ type AIGatewaySummary,
122
+ type DailyCostData,
123
+ type DailyUsageMetrics,
124
+ // Project registry (D1-backed)
125
+ getProjects,
126
+ type Project,
127
+ type ResourceType,
128
+ } from './lib/shared/cloudflare';
129
+ import {
130
+ CF_SIMPLE_ALLOWANCES,
131
+ type SimpleAllowanceType,
132
+ } from './lib/shared/allowances';
133
+ import {
134
+ calculateHourlyCosts,
135
+ calculateDailyBillableCosts,
136
+ type HourlyUsageMetrics,
137
+ type AccountDailyUsage,
138
+ type DailyBillableCostBreakdown,
139
+ PRICING_TIERS,
140
+ PAID_ALLOWANCES,
141
+ HOURS_PER_MONTH,
142
+ } from '@littlebearapps/platform-consumer-sdk';
143
+ import {
144
+ getDailyUsageFromAnalyticsEngine,
145
+ queryUsageByTimeBucket,
146
+ type TimeBucketedUsage,
147
+ type TimeBucketQueryParams,
148
+ } from './lib/analytics-engine';
149
+ import {
150
+ getPIDState,
151
+ savePIDState,
152
+ computePID,
153
+ calculateUtilisation,
154
+ shouldUpdatePID,
155
+ formatThrottleRate,
156
+ type PIDState,
157
+ } from './lib/control';
158
+ import {
159
+ getReservoirState,
160
+ saveReservoirState,
161
+ addSample,
162
+ getPercentiles,
163
+ formatPercentiles,
164
+ type ReservoirState,
165
+ } from './lib/telemetry-sampling';
166
+ import { calculateBCU, formatBCUResult, type BCUResult } from './lib/economics';
167
+ import { pingHeartbeat } from '@littlebearapps/platform-consumer-sdk';
168
+
169
+ // =============================================================================
170
+ // SHARED USAGE MODULES (Types, Constants, Utilities)
171
+ // =============================================================================
172
+ import {
173
+ // Types
174
+ type Env,
175
+ type DailyLimits,
176
+ SamplingMode,
177
+ type PreviousHourMetrics,
178
+ type MetricDeltas,
179
+ type PlatformPricing,
180
+ type UsageResponse,
181
+ type EnhancedUsageResponse,
182
+ type ComparisonResponse,
183
+ type SettingsResponse,
184
+ type ProjectedBurn,
185
+ type LiveUsageResponse,
186
+ type FeatureUsageData,
187
+ type WorkersAIResponse,
188
+ type DailyCostResponse,
189
+ type ServiceUtilizationStatus,
190
+ type ResourceMetricData,
191
+ type ProviderHealthData,
192
+ type ProjectUtilizationData,
193
+ type GitHubUsageResponse,
194
+ type BurnRateResponse,
195
+ type BudgetThresholds,
196
+ type RollingStats,
197
+ type AnomalyRecord,
198
+ type AnomaliesResponse,
199
+ type GitHubUsageItem,
200
+ type GitHubPlanInfo,
201
+ type GitHubBillingData,
202
+ type GitHubPlanInclusions,
203
+ type AnthropicUsageData,
204
+ type OpenAIUsageData,
205
+ type ResendUsageData,
206
+ type ApifyUsageData,
207
+ type ErrorAlertPayload,
208
+ type FeatureBatchState,
209
+ type VectorizeAttribution,
210
+ type ProjectLookupCache,
211
+ // Constants
212
+ CB_KEYS,
213
+ FEATURE_KV_KEYS,
214
+ SETTINGS_KEY,
215
+ METRIC_TO_BUDGET_KEY,
216
+ FEATURE_METRIC_FIELDS,
217
+ RESOURCE_TYPE_MAP,
218
+ DEFAULT_PRICING,
219
+ DEFAULT_BUDGET_THRESHOLDS,
220
+ CF_OVERAGE_PRICING,
221
+ FALLBACK_PROJECT_CONFIGS,
222
+ ERROR_RATE_THRESHOLDS,
223
+ KNOWN_DATASETS,
224
+ QUERIED_DATASETS,
225
+ EXPECTED_USAGE_SETTINGS,
226
+ BILLING_SETTINGS_CACHE_TTL_MS,
227
+ MAX_HOURLY_DELTAS,
228
+ // Utilities
229
+ getCacheKey,
230
+ parseQueryParams,
231
+ parseQueryParamsWithRegistry,
232
+ getValidProjects,
233
+ jsonResponse,
234
+ buildProjectLookupCache,
235
+ identifyProjectWithCache,
236
+ filterByProject,
237
+ filterByProjectWithRegistry,
238
+ attributeVectorizeByProject,
239
+ calculateSummary,
240
+ calcTrend,
241
+ calculateDelta,
242
+ loadPreviousHourMetrics,
243
+ savePreviousHourMetrics,
244
+ getQueueProject,
245
+ getWorkflowProject,
246
+ loadPricing,
247
+ resetPricingCache,
248
+ fetchBillingSettings,
249
+ resetBillingSettingsCache,
250
+ getPlatformSettings,
251
+ getBudgetThresholds,
252
+ determineSamplingMode,
253
+ getServiceUtilizationStatus,
254
+ shouldRunThisHour,
255
+ generateId,
256
+ getCurrentHour,
257
+ getTodayDate,
258
+ validateApiKey,
259
+ fetchWithRetry,
260
+ } from './lib/usage/shared';
261
+
262
+ // =============================================================================
263
+ // HANDLER MODULES (HTTP endpoint handlers)
264
+ // =============================================================================
265
+ import {
266
+ // Data query functions
267
+ getCurrentPricingVersionId,
268
+ resetPricingVersionCache,
269
+ queryD1UsageData,
270
+ queryD1DailyCosts,
271
+ calculateProjectedBurn,
272
+ queryAIGatewayMetrics,
273
+ // Usage metrics handlers
274
+ handleUsage,
275
+ handleCosts,
276
+ handleThresholds,
277
+ handleEnhanced,
278
+ handleCompare,
279
+ handleDaily,
280
+ handleStatus,
281
+ handleUtilization,
282
+ handleProjects,
283
+ handleAnomalies,
284
+ // Feature handlers
285
+ handleFeatures,
286
+ handleWorkersAI,
287
+ handleUsageQuery,
288
+ handleGetFeatureCircuitBreakers,
289
+ handlePutFeatureCircuitBreakers,
290
+ handleGetCircuitBreakerEvents,
291
+ handleGetFeatureBudgets,
292
+ handlePutFeatureBudgets,
293
+ handleGetFeatureHistory,
294
+ // Settings handlers
295
+ handleGetSettings,
296
+ handlePutSettings,
297
+ handleSettingsVerify,
298
+ handleCircuitBreakerStatus,
299
+ handleLiveUsage,
300
+ // Admin handlers
301
+ handleResetCircuitBreaker,
302
+ handleBackfill,
303
+ // DLQ admin handlers
304
+ handleListDLQ,
305
+ handleDLQStats,
306
+ handleReplayDLQ,
307
+ handleDiscardDLQ,
308
+ handleReplayAllDLQ,
309
+ // Health trends handlers (Phase 2 AI Judge)
310
+ handleGetHealthTrends,
311
+ handleGetLatestHealthTrends,
312
+ // Gap detection and backfill handlers
313
+ handleGapsStatus,
314
+ handleGapsHistory,
315
+ handleGapsBackfill,
316
+ handleBackfillHistory,
317
+ handleProjectsHealth,
318
+ // Audit handlers (Phase 2 Usage Capture Audit)
319
+ handleGetAudit,
320
+ handleGetAuditHistory,
321
+ handleGetAttribution,
322
+ handleGetFeatureCoverage,
323
+ // Behavioral analysis handlers
324
+ handleGetBehavioral,
325
+ handleGetHotspots,
326
+ handleGetRegressions,
327
+ handleAcknowledgeRegression,
328
+ } from './lib/usage/handlers';
329
+
330
+ // =============================================================================
331
+ // SCHEDULED MODULES (Cron-triggered data collection and rollups)
332
+ // =============================================================================
333
+ import {
334
+ // Data collection
335
+ persistHourlySnapshot,
336
+ persistResourceUsageSnapshots,
337
+ collectExternalMetrics,
338
+ type ExternalMetrics,
339
+ persistThirdPartyUsage,
340
+ validateCloudflareToken,
341
+ // Rollups
342
+ invalidateDailyCache,
343
+ runDailyRollup,
344
+ runFeatureUsageDailyRollup,
345
+ runMonthlyRollup,
346
+ cleanupOldData,
347
+ calculateUsageVsAllowancePercentages,
348
+ persistWorkersAIModelBreakdown,
349
+ persistAIGatewayModelBreakdown,
350
+ persistFeatureAIModelUsage,
351
+ runWorkersAIModelDailyRollup,
352
+ runAIGatewayModelDailyRollup,
353
+ backfillMissingDays,
354
+ // Anomaly detection
355
+ calculate7DayRollingStats,
356
+ detectAnomalies,
357
+ detectHourlyD1WriteAnomalies,
358
+ discoverAndUpdateDatasetRegistry,
359
+ // Error digest
360
+ checkAndAlertErrors,
361
+ sendHourlyErrorDigest,
362
+ sendDailyErrorSummary,
363
+ cleanupOldErrorEvents,
364
+ } from './lib/usage/scheduled';
365
+
366
+ // =============================================================================
367
+ // QUEUE MODULES (Telemetry processing and budget enforcement)
368
+ // =============================================================================
369
+ import {
370
+ // Queue consumer
371
+ handleQueue,
372
+ // DLQ handler
373
+ handleDLQ,
374
+ // Heartbeat handling
375
+ handleHeartbeat,
376
+ // Intelligent degradation
377
+ processIntelligentDegradation,
378
+ // Budget enforcement
379
+ checkAndUpdateBudgetStatus,
380
+ checkMonthlyBudgets,
381
+ checkAndTripCircuitBreakers,
382
+ determineCircuitBreakerStatus,
383
+ logCircuitBreakerEvent,
384
+ sendSlackAlert,
385
+ // D1/KV tracking
386
+ getD1WriteCount,
387
+ incrementD1WriteCount,
388
+ getDOGbSecondsCount,
389
+ setDOGbSecondsCount,
390
+ getDOGbSecondsThreshold,
391
+ } from './lib/usage/queue';
392
+
393
+ // Note: Data access functions imported from ./lib/usage/handlers (see imports above)
394
+
395
+ // =============================================================================
396
+ // HTTP ENDPOINT HANDLERS
397
+ // =============================================================================
398
+ // All handler functions are imported from ./lib/usage/handlers modules.
399
+ // See imports above for: handleUsage, handleCosts, handleThresholds, etc.
400
+ // =============================================================================
401
+
402
+ // =============================================================================
403
+ // RETRY HELPERS
404
+ // =============================================================================
405
+
406
+ /**
407
+ * Sleep for specified milliseconds
408
+ */
409
+ function sleep(ms: number): Promise<void> {
410
+ return new Promise((resolve) => setTimeout(resolve, ms));
411
+ }
412
+
413
+ /**
414
+ * Collect Cloudflare usage data with exponential backoff retry.
415
+ * F1 Fix: Prevents data gaps from transient GraphQL failures.
416
+ *
417
+ * Retry strategy: 2s, 4s, 8s (per Cloudflare recommendations)
418
+ * Rate limit: 300 queries/5 min - backoff helps stay within limits
419
+ *
420
+ * @param graphql - CloudflareGraphQL client instance
421
+ * @param log - Logger for tracking retry attempts
422
+ * @param maxRetries - Maximum number of retry attempts (default: 3)
423
+ * @returns Usage data from GraphQL
424
+ * @throws Error if all retries fail
425
+ */
426
+ async function collectWithRetry(
427
+ graphql: CloudflareGraphQL,
428
+ log: Logger,
429
+ maxRetries = 3
430
+ ): Promise<AccountUsage> {
431
+ let lastError: Error | null = null;
432
+
433
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
434
+ try {
435
+ log.info('GraphQL collection attempt', { attempt, maxRetries });
436
+ const usage = await graphql.getAllMetrics('24h');
437
+ if (attempt > 1) {
438
+ log.info('GraphQL collection succeeded after retry', { attempt });
439
+ }
440
+ return usage;
441
+ } catch (error) {
442
+ lastError = error instanceof Error ? error : new Error(String(error));
443
+ log.warn('GraphQL collection failed', {
444
+ attempt,
445
+ maxRetries,
446
+ error: lastError.message,
447
+ });
448
+
449
+ if (attempt < maxRetries) {
450
+ const delayMs = Math.pow(2, attempt) * 1000; // 2s, 4s, 8s
451
+ log.info('Retrying after delay', { delayMs, nextAttempt: attempt + 1 });
452
+ await sleep(delayMs);
453
+ }
454
+ }
455
+ }
456
+
457
+ throw lastError ?? new Error('GraphQL collection failed after all retries');
458
+ }
459
+
460
+ // =============================================================================
461
+ // SCHEDULED HANDLER
462
+ // =============================================================================
463
+
464
+ /**
465
+ * Scheduled handler - runs hourly at :00.
466
+ * Collects Cloudflare usage data, persists to D1, runs rollups at midnight.
467
+ */
468
+ async function handleScheduled(
469
+ event: ScheduledEvent,
470
+ env: Env,
471
+ ctx: ExecutionContext
472
+ ): Promise<void> {
473
+ const log = createLoggerFromEnv(env, 'platform-usage', 'platform:usage:scheduled');
474
+ const startTime = Date.now();
475
+ const currentHour = new Date(event.scheduledTime).getUTCHours();
476
+ const snapshotHour = getCurrentHour();
477
+ const today = getTodayDate();
478
+
479
+ log.info('Starting data collection', { snapshotHour, hour: currentHour });
480
+
481
+ // Gatus heartbeat is pinged on success/fail only (no /start support)
482
+
483
+ // Wrap env with Platform SDK for automatic metric tracking
484
+ // Note: platform:usage:scheduled tracks the hourly data collection job
485
+ // This gives visibility into the platform-usage worker's own resource consumption
486
+
487
+ const trackedEnv = withFeatureBudget(env, 'platform:usage:scheduled', {
488
+ ctx,
489
+ cacheKv: env.PLATFORM_CACHE as any, // Type assertion for KVNamespace compatibility
490
+ telemetryQueue: env.PLATFORM_TELEMETRY,
491
+ checkCircuitBreaker: false, // Don't block scheduled jobs - this is the control plane
492
+ });
493
+
494
+ // 1. Check global stop flag (use raw env for circuit breaker state)
495
+ const globalStop = await env.PLATFORM_CACHE.get(CB_KEYS.GLOBAL_STOP);
496
+ if (globalStop === 'true') {
497
+ log.info('Global stop flag is set, skipping collection');
498
+ return;
499
+ }
500
+
501
+ // 2. Determine sampling mode (use trackedEnv for D1 operations inside)
502
+ const previousMode = await env.PLATFORM_CACHE.get(CB_KEYS.USAGE_SAMPLING_MODE);
503
+ const samplingMode = await determineSamplingMode(trackedEnv);
504
+ const samplingModeStr = SamplingMode[samplingMode];
505
+
506
+ // Log sampling mode change
507
+ if (previousMode && previousMode !== samplingModeStr) {
508
+ log.info('Sampling mode changed', { previousMode, newMode: samplingModeStr });
509
+ await logCircuitBreakerEvent(
510
+ trackedEnv,
511
+ previousMode > samplingModeStr ? 'sample_restore' : 'sample_reduce',
512
+ 'platform-usage',
513
+ `Sampling mode changed from ${previousMode} to ${samplingModeStr}`,
514
+ await getD1WriteCount(trackedEnv),
515
+ samplingModeStr,
516
+ previousMode
517
+ );
518
+ }
519
+
520
+ // Update sampling mode in KV (circuit breaker state - not tracked)
521
+ await env.PLATFORM_CACHE.put(CB_KEYS.USAGE_SAMPLING_MODE, samplingModeStr);
522
+
523
+ // 3. Check if we should run this hour
524
+ if (!shouldRunThisHour(samplingMode, currentHour)) {
525
+ log.info('Skipping collection', { mode: samplingModeStr, hour: currentHour });
526
+ return;
527
+ }
528
+
529
+ let totalD1Writes = 0;
530
+
531
+ try {
532
+ // 3.5 Validate API token before making GraphQL calls
533
+ const accountName = await validateCloudflareToken(trackedEnv);
534
+ if (!accountName) {
535
+ log.error('Cloudflare API token validation failed - aborting collection');
536
+ return;
537
+ }
538
+
539
+ // 4. Collect Cloudflare usage data with retry logic (F1 Fix)
540
+ // CloudflareGraphQL constructor takes env object with CLOUDFLARE_ACCOUNT_ID and CLOUDFLARE_API_TOKEN
541
+ const graphql = new CloudflareGraphQL({
542
+ CLOUDFLARE_ACCOUNT_ID: trackedEnv.CLOUDFLARE_ACCOUNT_ID,
543
+ CLOUDFLARE_API_TOKEN: trackedEnv.CLOUDFLARE_API_TOKEN,
544
+ });
545
+ const usage = await collectWithRetry(graphql, log);
546
+
547
+ // Load previous hour's cumulative metrics for delta calculation
548
+ const previousMetrics = await loadPreviousHourMetrics(trackedEnv);
549
+ if (previousMetrics) {
550
+ log.info('Loaded previous metrics, calculating deltas', {
551
+ previousHour: previousMetrics.snapshotHour,
552
+ });
553
+ } else {
554
+ log.info('No previous metrics found - first collection, will use raw cumulative values');
555
+ }
556
+
557
+ // DEBUG: Log GraphQL results to diagnose zero metrics issue
558
+ log.info('GraphQL results', {
559
+ workers: usage.workers.length,
560
+ d1: usage.d1.length,
561
+ kv: usage.kv.length,
562
+ r2: usage.r2.length,
563
+ doRequests: usage.durableObjects.requests,
564
+ vectorize: usage.vectorize.length,
565
+ aiGateway: usage.aiGateway.length,
566
+ pages: usage.pages.length,
567
+ });
568
+ if (usage.workers.length > 0) {
569
+ const sample = usage.workers[0];
570
+ log.info('Sample Worker', {
571
+ scriptName: sample.scriptName,
572
+ requests: sample.requests,
573
+ cpuTimeMs: sample.cpuTimeMs,
574
+ });
575
+ } else {
576
+ log.warn('No workers data returned from GraphQL API');
577
+ }
578
+
579
+ // Collect Workflows metrics separately (not part of AccountUsage)
580
+ const workflowsData = await graphql.getWorkflowsMetrics('24h');
581
+ const workflows = {
582
+ executions: workflowsData.totalExecutions,
583
+ successes: workflowsData.totalSuccesses,
584
+ failures: workflowsData.totalFailures,
585
+ wallTimeMs: workflowsData.totalWallTimeMs,
586
+ cpuTimeMs: workflowsData.totalCpuTimeMs,
587
+ };
588
+
589
+ // Collect Queues metrics separately (not part of AccountUsage)
590
+ // Uses queueConsumerMetricsAdaptiveGroups + queueMessageOperationsAdaptiveGroups
591
+ const queuesData = await graphql.getQueuesMetrics('24h');
592
+ const totalMessagesProduced = queuesData.reduce((sum, q) => sum + q.messagesProduced, 0);
593
+ const totalMessagesConsumed = queuesData.reduce((sum, q) => sum + q.messagesConsumed, 0);
594
+ const queues = {
595
+ messagesProduced: totalMessagesProduced,
596
+ messagesConsumed: totalMessagesConsumed,
597
+ };
598
+ log.info('Queues', {
599
+ queuesCount: queuesData.length,
600
+ produced: totalMessagesProduced,
601
+ consumed: totalMessagesConsumed,
602
+ });
603
+
604
+ // Collect Workers AI model breakdown for detailed tracking
605
+ const workersAIData = await graphql.getWorkersAIMetrics('24h');
606
+ log.info('Workers AI getWorkersAIMetrics returned', {
607
+ metricsLength: workersAIData.metrics.length,
608
+ totalRequests: workersAIData.totalRequests,
609
+ });
610
+ if (workersAIData.metrics.length > 0) {
611
+ totalD1Writes += await persistWorkersAIModelBreakdown(
612
+ trackedEnv,
613
+ snapshotHour,
614
+ workersAIData.metrics
615
+ );
616
+ log.info('Persisted Workers AI model entries', { count: workersAIData.metrics.length });
617
+ }
618
+
619
+ // Collect Workers AI neurons/tokens via GraphQL (aiInferenceAdaptive dataset)
620
+ // This provides accurate neuron counts for billing - much better than Analytics Engine estimates
621
+ const hourAgo = new Date(Date.now() - 60 * 60 * 1000);
622
+ const now = new Date();
623
+
624
+ // Workers AI Neurons via GraphQL (aiInferenceAdaptive dataset)
625
+ // LIMITATION: GraphQL only provides account-level totals with byModel breakdown.
626
+ // There is NO per-script or per-project dimension available from the Cloudflare GraphQL API.
627
+ // All Workers AI neurons are therefore tracked as '_unattributed' in hourly_usage_snapshots.
628
+ // Future option: Reconcile with Platform SDK telemetry which tracks aiNeurons per feature.
629
+ const workersAINeuronData = await graphql.getWorkersAINeuronsGraphQL({
630
+ startDate: hourAgo.toISOString().split('T')[0],
631
+ endDate: now.toISOString().split('T')[0],
632
+ });
633
+ log.info('Workers AI neurons', {
634
+ totalNeurons: workersAINeuronData.totalNeurons,
635
+ inputTokens: workersAINeuronData.totalInputTokens,
636
+ outputTokens: workersAINeuronData.totalOutputTokens,
637
+ modelCount: workersAINeuronData.byModel.length,
638
+ });
639
+
640
+ // Fallback: If project-reported metrics are empty but GraphQL has model data, use GraphQL data
641
+ // This ensures workersai_model_usage table gets populated even if projects aren't writing workersai.cost
642
+ if (workersAIData.metrics.length === 0 && workersAINeuronData.byModel.length > 0) {
643
+ log.info('Using GraphQL neurons fallback for Workers AI model breakdown', {
644
+ modelCount: workersAINeuronData.byModel.length,
645
+ });
646
+ // Convert GraphQL neuron data to the format expected by persistWorkersAIModelBreakdown
647
+ const fallbackMetrics = workersAINeuronData.byModel.map((m) => ({
648
+ project: 'all', // GraphQL doesn't provide per-project breakdown
649
+ model: m.modelId, // modelId is the correct property name from GraphQL response
650
+ requests: m.requestCount,
651
+ inputTokens: m.inputTokens,
652
+ outputTokens: m.outputTokens,
653
+ costUsd: m.neurons * 0.000011, // $0.011 per 1000 neurons
654
+ isEstimated: true, // Mark as estimated since we're deriving from neurons
655
+ }));
656
+ totalD1Writes += await persistWorkersAIModelBreakdown(
657
+ trackedEnv,
658
+ snapshotHour,
659
+ fallbackMetrics
660
+ );
661
+ log.info('Persisted Workers AI model entries from GraphQL fallback', {
662
+ count: fallbackMetrics.length,
663
+ });
664
+ }
665
+
666
+ // Collect Vectorize query metrics via GraphQL (vectorizeV2QueriesAdaptiveGroups dataset)
667
+ // This provides actual query counts - previously was hardcoded to 0
668
+ const vectorizeQueryData = await graphql.getVectorizeQueriesGraphQL({
669
+ startDate: hourAgo.toISOString().split('T')[0],
670
+ endDate: now.toISOString().split('T')[0],
671
+ });
672
+ log.info('Vectorize queries', {
673
+ dimensions: vectorizeQueryData.totalQueriedDimensions,
674
+ vectors: vectorizeQueryData.totalServedVectors,
675
+ indexes: vectorizeQueryData.byIndex.length,
676
+ });
677
+
678
+ // Collect Vectorize storage metrics via GraphQL (vectorizeV2StorageAdaptiveGroups dataset)
679
+ const vectorizeStorageData = await graphql.getVectorizeStorageGraphQL({
680
+ startDate: hourAgo.toISOString().split('T')[0],
681
+ endDate: now.toISOString().split('T')[0],
682
+ });
683
+ log.info('Vectorize storage', {
684
+ dimensions: vectorizeStorageData.totalStoredDimensions,
685
+ vectors: vectorizeStorageData.totalVectorCount,
686
+ indexes: vectorizeStorageData.byIndex.length,
687
+ });
688
+
689
+ // Attribute Vectorize queries to projects using D1 registry
690
+ // This enables tracking of Vectorize usage per project instead of just account-level totals
691
+ const projectCache = await buildProjectLookupCache(trackedEnv);
692
+ const vectorizeAttribution = attributeVectorizeByProject(
693
+ vectorizeQueryData.byIndex,
694
+ projectCache,
695
+ vectorizeQueryData.totalQueriedDimensions
696
+ );
697
+ log.info('Vectorize attribution', {
698
+ projectsAttributed: vectorizeAttribution.byProject.size,
699
+ unattributed: vectorizeAttribution.unattributed,
700
+ });
701
+ // Log per-project breakdown for debugging
702
+ for (const [projectId, dimensions] of vectorizeAttribution.byProject) {
703
+ log.info('Project vectorize dimensions', { projectId, dimensions });
704
+ }
705
+ if (vectorizeAttribution.unattributed > 0) {
706
+ log.info('Unattributed vectorize dimensions', {
707
+ dimensions: vectorizeAttribution.unattributed,
708
+ });
709
+ }
710
+
711
+ // Build AI metrics object for persistHourlySnapshot
712
+ const aiMetrics = {
713
+ workersAINeurons: workersAINeuronData.totalNeurons,
714
+ workersAIRequests: workersAINeuronData.byModel.reduce((sum, m) => sum + m.requestCount, 0),
715
+ vectorizeQueries: vectorizeQueryData.totalQueriedDimensions, // Using dimensions as proxy for query count
716
+ vectorizeVectorsQueried: vectorizeQueryData.totalServedVectors,
717
+ };
718
+
719
+ // Collect AI Gateway model breakdown for detailed tracking
720
+ log.info('AI Gateway gateways count', { count: usage.aiGateway.length });
721
+ for (const gateway of usage.aiGateway) {
722
+ log.info('AI Gateway', {
723
+ gatewayId: gateway.gatewayId,
724
+ totalRequests: gateway.totalRequests,
725
+ byModelLength: gateway.byModel?.length ?? 'undefined',
726
+ });
727
+ if (gateway.byModel && gateway.byModel.length > 0) {
728
+ totalD1Writes += await persistAIGatewayModelBreakdown(
729
+ env,
730
+ snapshotHour,
731
+ gateway.gatewayId,
732
+ gateway.byModel
733
+ );
734
+ log.info('Persisted AI Gateway model entries', {
735
+ gatewayId: gateway.gatewayId,
736
+ count: gateway.byModel.length,
737
+ });
738
+ } else {
739
+ log.warn('AI Gateway has no model breakdown data', { gatewayId: gateway.gatewayId });
740
+ }
741
+ }
742
+
743
+ // Calculate costs (include queues data for cost calculation)
744
+ // Reserved for future cost tracking feature
745
+ const _costs = calculateMonthlyCosts({ ...usage, queues: queuesData });
746
+
747
+ // Build current cumulative metrics for delta calculation
748
+ // These are the raw cumulative values from GraphQL (daily totals)
749
+ const workersRequests = usage.workers.reduce((sum, w) => sum + w.requests, 0);
750
+ const workersErrors = usage.workers.reduce((sum, w) => sum + w.errors, 0);
751
+ const workersCpuTimeMs = usage.workers.reduce((sum, w) => sum + w.cpuTimeMs, 0);
752
+ const d1RowsRead = usage.d1.reduce((sum, d) => sum + d.rowsRead, 0);
753
+ const d1RowsWritten = usage.d1.reduce((sum, d) => sum + d.rowsWritten, 0);
754
+ const kvReads = usage.kv.reduce((sum, k) => sum + k.reads, 0);
755
+ const kvWrites = usage.kv.reduce((sum, k) => sum + k.writes, 0);
756
+ const kvDeletes = usage.kv.reduce((sum, k) => sum + k.deletes, 0);
757
+ const kvLists = usage.kv.reduce((sum, k) => sum + k.lists, 0);
758
+ const r2ClassAOps = usage.r2.reduce((sum, r) => sum + r.classAOperations, 0);
759
+ const r2ClassBOps = usage.r2.reduce((sum, r) => sum + r.classBOperations, 0);
760
+ const r2EgressBytes = usage.r2.reduce((sum, r) => sum + r.egressBytes, 0);
761
+ const aiGatewayRequests = usage.aiGateway.reduce((sum, a) => sum + a.totalRequests, 0);
762
+ const aiGatewayTokensIn = 0; // Not available separately
763
+ const aiGatewayTokensOut = usage.aiGateway.reduce((sum, a) => sum + a.totalTokens, 0);
764
+ const aiGatewayCached = usage.aiGateway.reduce((sum, a) => sum + a.cachedRequests, 0);
765
+ const pagesDeployments = usage.pages.reduce((sum, p) => sum + p.totalBuilds, 0);
766
+
767
+ // Current cumulative metrics object (to be saved for next delta calculation)
768
+ const currentCumulativeMetrics: PreviousHourMetrics = {
769
+ snapshotHour,
770
+ timestamp: Math.floor(Date.now() / 1000),
771
+ do: {
772
+ requests: usage.durableObjects.requests,
773
+ gbSeconds: usage.durableObjects.gbSeconds,
774
+ storageReadUnits: usage.durableObjects.storageReadUnits,
775
+ storageWriteUnits: usage.durableObjects.storageWriteUnits,
776
+ storageDeleteUnits: usage.durableObjects.storageDeleteUnits,
777
+ },
778
+ workersAI: {
779
+ neurons: aiMetrics.workersAINeurons,
780
+ requests: aiMetrics.workersAIRequests,
781
+ },
782
+ vectorize: {
783
+ queries: aiMetrics.vectorizeQueries,
784
+ },
785
+ queues: {
786
+ produced: totalMessagesProduced,
787
+ consumed: totalMessagesConsumed,
788
+ },
789
+ workflows: {
790
+ executions: workflowsData.totalExecutions,
791
+ successes: workflowsData.totalSuccesses,
792
+ failures: workflowsData.totalFailures,
793
+ wallTimeMs: workflowsData.totalWallTimeMs,
794
+ cpuTimeMs: workflowsData.totalCpuTimeMs,
795
+ },
796
+ workers: {
797
+ requests: workersRequests,
798
+ errors: workersErrors,
799
+ cpuTimeMs: workersCpuTimeMs,
800
+ },
801
+ d1: {
802
+ rowsRead: d1RowsRead,
803
+ rowsWritten: d1RowsWritten,
804
+ },
805
+ kv: {
806
+ reads: kvReads,
807
+ writes: kvWrites,
808
+ deletes: kvDeletes,
809
+ lists: kvLists,
810
+ },
811
+ r2: {
812
+ classAOps: r2ClassAOps,
813
+ classBOps: r2ClassBOps,
814
+ egressBytes: r2EgressBytes,
815
+ },
816
+ aiGateway: {
817
+ requests: aiGatewayRequests,
818
+ tokensIn: aiGatewayTokensIn,
819
+ tokensOut: aiGatewayTokensOut,
820
+ cached: aiGatewayCached,
821
+ },
822
+ pages: {
823
+ deployments: pagesDeployments,
824
+ bandwidthBytes: 0, // Not available from PagesMetrics
825
+ },
826
+ };
827
+
828
+ // Calculate deltas (current - previous, or current if no previous)
829
+ // MAX_HOURLY_DELTAS caps prevent cumulative values being stored as hourly deltas
830
+ // when the KV key for previous metrics expires or is missing.
831
+ const deltas: MetricDeltas = {
832
+ do: {
833
+ requests: calculateDelta(usage.durableObjects.requests, previousMetrics?.do?.requests, MAX_HOURLY_DELTAS.do_requests),
834
+ gbSeconds: calculateDelta(usage.durableObjects.gbSeconds, previousMetrics?.do?.gbSeconds, MAX_HOURLY_DELTAS.do_gb_seconds),
835
+ storageReadUnits: calculateDelta(
836
+ usage.durableObjects.storageReadUnits,
837
+ previousMetrics?.do?.storageReadUnits
838
+ ),
839
+ storageWriteUnits: calculateDelta(
840
+ usage.durableObjects.storageWriteUnits,
841
+ previousMetrics?.do?.storageWriteUnits
842
+ ),
843
+ storageDeleteUnits: calculateDelta(
844
+ usage.durableObjects.storageDeleteUnits,
845
+ previousMetrics?.do?.storageDeleteUnits
846
+ ),
847
+ },
848
+ workersAI: {
849
+ neurons: calculateDelta(aiMetrics.workersAINeurons, previousMetrics?.workersAI?.neurons, MAX_HOURLY_DELTAS.ai_neurons),
850
+ requests: calculateDelta(aiMetrics.workersAIRequests, previousMetrics?.workersAI?.requests, MAX_HOURLY_DELTAS.ai_requests),
851
+ },
852
+ vectorize: {
853
+ queries: calculateDelta(aiMetrics.vectorizeQueries, previousMetrics?.vectorize?.queries, MAX_HOURLY_DELTAS.vectorize_queries),
854
+ },
855
+ queues: {
856
+ produced: calculateDelta(totalMessagesProduced, previousMetrics?.queues?.produced, MAX_HOURLY_DELTAS.queue_produced),
857
+ consumed: calculateDelta(totalMessagesConsumed, previousMetrics?.queues?.consumed, MAX_HOURLY_DELTAS.queue_consumed),
858
+ },
859
+ workflows: {
860
+ executions: calculateDelta(
861
+ workflowsData.totalExecutions,
862
+ previousMetrics?.workflows?.executions,
863
+ MAX_HOURLY_DELTAS.workflow_executions
864
+ ),
865
+ successes: calculateDelta(
866
+ workflowsData.totalSuccesses,
867
+ previousMetrics?.workflows?.successes,
868
+ MAX_HOURLY_DELTAS.workflow_executions
869
+ ),
870
+ failures: calculateDelta(workflowsData.totalFailures, previousMetrics?.workflows?.failures),
871
+ wallTimeMs: calculateDelta(
872
+ workflowsData.totalWallTimeMs,
873
+ previousMetrics?.workflows?.wallTimeMs
874
+ ),
875
+ cpuTimeMs: calculateDelta(
876
+ workflowsData.totalCpuTimeMs,
877
+ previousMetrics?.workflows?.cpuTimeMs
878
+ ),
879
+ },
880
+ workers: {
881
+ requests: calculateDelta(workersRequests, previousMetrics?.workers?.requests, MAX_HOURLY_DELTAS.workers_requests),
882
+ errors: calculateDelta(workersErrors, previousMetrics?.workers?.errors, MAX_HOURLY_DELTAS.workers_errors),
883
+ cpuTimeMs: calculateDelta(workersCpuTimeMs, previousMetrics?.workers?.cpuTimeMs, MAX_HOURLY_DELTAS.workers_cpu_ms),
884
+ },
885
+ d1: {
886
+ rowsRead: calculateDelta(d1RowsRead, previousMetrics?.d1?.rowsRead, MAX_HOURLY_DELTAS.d1_rows_read),
887
+ rowsWritten: calculateDelta(d1RowsWritten, previousMetrics?.d1?.rowsWritten, MAX_HOURLY_DELTAS.d1_rows_written),
888
+ },
889
+ kv: {
890
+ reads: calculateDelta(kvReads, previousMetrics?.kv?.reads, MAX_HOURLY_DELTAS.kv_reads),
891
+ writes: calculateDelta(kvWrites, previousMetrics?.kv?.writes, MAX_HOURLY_DELTAS.kv_writes),
892
+ deletes: calculateDelta(kvDeletes, previousMetrics?.kv?.deletes, MAX_HOURLY_DELTAS.kv_deletes),
893
+ lists: calculateDelta(kvLists, previousMetrics?.kv?.lists, MAX_HOURLY_DELTAS.kv_lists),
894
+ },
895
+ r2: {
896
+ classAOps: calculateDelta(r2ClassAOps, previousMetrics?.r2?.classAOps, MAX_HOURLY_DELTAS.r2_class_a),
897
+ classBOps: calculateDelta(r2ClassBOps, previousMetrics?.r2?.classBOps, MAX_HOURLY_DELTAS.r2_class_b),
898
+ egressBytes: calculateDelta(r2EgressBytes, previousMetrics?.r2?.egressBytes, MAX_HOURLY_DELTAS.r2_egress_bytes),
899
+ },
900
+ aiGateway: {
901
+ requests: calculateDelta(aiGatewayRequests, previousMetrics?.aiGateway?.requests, MAX_HOURLY_DELTAS.ai_gateway_requests),
902
+ tokensIn: calculateDelta(aiGatewayTokensIn, previousMetrics?.aiGateway?.tokensIn, MAX_HOURLY_DELTAS.ai_gateway_tokens),
903
+ tokensOut: calculateDelta(aiGatewayTokensOut, previousMetrics?.aiGateway?.tokensOut, MAX_HOURLY_DELTAS.ai_gateway_tokens),
904
+ cached: calculateDelta(aiGatewayCached, previousMetrics?.aiGateway?.cached),
905
+ },
906
+ pages: {
907
+ deployments: calculateDelta(pagesDeployments, previousMetrics?.pages?.deployments, MAX_HOURLY_DELTAS.pages_deployments),
908
+ bandwidthBytes: calculateDelta(0, previousMetrics?.pages?.bandwidthBytes),
909
+ },
910
+ };
911
+
912
+ log.info('Calculated deltas', {
913
+ doRequests: deltas.do.requests,
914
+ workersRequests: deltas.workers.requests,
915
+ d1Reads: deltas.d1.rowsRead,
916
+ });
917
+
918
+ // Calculate hourly costs using delta values with proper proration
919
+ // This fixes the issue where monthly base costs ($5/mo) were applied without proration
920
+ const hourlyUsageMetrics: HourlyUsageMetrics = {
921
+ workersRequests: deltas.workers.requests,
922
+ workersCpuMs: deltas.workers.cpuTimeMs,
923
+ d1Reads: deltas.d1.rowsRead,
924
+ d1Writes: deltas.d1.rowsWritten,
925
+ kvReads: deltas.kv.reads,
926
+ kvWrites: deltas.kv.writes,
927
+ kvDeletes: deltas.kv.deletes,
928
+ kvLists: deltas.kv.lists,
929
+ r2ClassA: deltas.r2.classAOps,
930
+ r2ClassB: deltas.r2.classBOps,
931
+ vectorizeQueries: deltas.vectorize.queries,
932
+ aiGatewayRequests: deltas.aiGateway.requests,
933
+ durableObjectsRequests: deltas.do.requests,
934
+ durableObjectsGbSeconds: deltas.do.gbSeconds,
935
+ workersAINeurons: deltas.workersAI.neurons,
936
+ queuesMessages: deltas.queues.produced + deltas.queues.consumed,
937
+ };
938
+ const hourlyCosts = calculateHourlyCosts(hourlyUsageMetrics);
939
+ log.info('Hourly prorated costs', {
940
+ workers: hourlyCosts.workers,
941
+ baseHourly: PRICING_TIERS.workers.baseCostMonthly / HOURS_PER_MONTH,
942
+ total: hourlyCosts.total,
943
+ });
944
+
945
+ // 5. Persist hourly snapshot for 'all' project (using deltas)
946
+ totalD1Writes += await persistHourlySnapshot(
947
+ env,
948
+ snapshotHour,
949
+ 'all',
950
+ usage,
951
+ hourlyCosts, // Use prorated hourly costs instead of monthly costs
952
+ samplingMode,
953
+ workflows,
954
+ aiMetrics,
955
+ queues,
956
+ deltas // Pass deltas for accurate hourly values
957
+ );
958
+
959
+ // 6. Persist per-project breakdowns
960
+ const projectUsage = calculateProjectCosts(usage);
961
+
962
+ // Build per-project cumulative values for delta calculation
963
+ const projectCumulatives: Record<
964
+ string,
965
+ {
966
+ workersRequests: number;
967
+ workersErrors: number;
968
+ workersCpuTimeMs: number;
969
+ d1RowsRead: number;
970
+ d1RowsWritten: number;
971
+ kvReads: number;
972
+ kvWrites: number;
973
+ kvDeletes: number;
974
+ kvLists: number;
975
+ r2ClassAOps: number;
976
+ r2ClassBOps: number;
977
+ doRequests: number;
978
+ doGbSeconds: number;
979
+ }
980
+ > = {};
981
+
982
+ for (const project of projectUsage) {
983
+ const projectId = project.project.toLowerCase().replace(/ /g, '-');
984
+ projectCumulatives[projectId] = {
985
+ workersRequests: project.workersRequests,
986
+ workersErrors: project.workersErrors,
987
+ workersCpuTimeMs: project.workersCpuTimeMs,
988
+ d1RowsRead: project.d1RowsRead,
989
+ d1RowsWritten: project.d1RowsWritten,
990
+ kvReads: project.kvReads,
991
+ kvWrites: project.kvWrites,
992
+ kvDeletes: project.kvDeletes,
993
+ kvLists: project.kvLists,
994
+ r2ClassAOps: project.r2ClassAOps,
995
+ r2ClassBOps: project.r2ClassBOps,
996
+ doRequests: project.doRequests,
997
+ doGbSeconds: project.doGbSeconds,
998
+ };
999
+ }
1000
+
1001
+ // Add per-project cumulative values to currentCumulativeMetrics before saving
1002
+ currentCumulativeMetrics.projects = projectCumulatives;
1003
+
1004
+ // Save current cumulative metrics (including per-project) for next hour's delta calculation
1005
+ await savePreviousHourMetrics(trackedEnv, currentCumulativeMetrics);
1006
+
1007
+ // F2 Fix: Removed per-project INSERT to eliminate double-counting
1008
+ // Per-project data is now available via:
1009
+ // - resource_usage_snapshots (SDK telemetry, per-resource granularity)
1010
+ // - Analytics Engine (real-time telemetry)
1011
+ // Account-wide totals are stored in project='all' row only
1012
+
1013
+ // Update rolling 24h DO GB-seconds counter for circuit breaker (per-project)
1014
+ // TODO: This currently queries historical per-project data; will need refactoring
1015
+ // to use resource_usage_snapshots or Analytics Engine once historical data ages out
1016
+ for (const project of projectUsage) {
1017
+ const projectNormalized = project.project.toLowerCase().replace(/ /g, '-');
1018
+ try {
1019
+ const doSum = await env.PLATFORM_DB.prepare(
1020
+ `SELECT SUM(do_gb_seconds) as total FROM hourly_usage_snapshots
1021
+ WHERE project = ? AND snapshot_hour >= datetime('now', '-24 hours')`
1022
+ )
1023
+ .bind(projectNormalized)
1024
+ .first<{ total: number | null }>();
1025
+
1026
+ const doGbSeconds24h = doSum?.total ?? 0;
1027
+ await setDOGbSecondsCount(trackedEnv, projectNormalized, doGbSeconds24h);
1028
+ log.info('Project DO GB-seconds (24h)', {
1029
+ project: projectNormalized,
1030
+ gbSeconds: doGbSeconds24h,
1031
+ });
1032
+ } catch (err) {
1033
+ log.error(`Failed to update DO GB-seconds for ${projectNormalized}`, err);
1034
+ }
1035
+ }
1036
+
1037
+ // 6.4 Persist _unattributed row for usage that couldn't be attributed to specific projects
1038
+ // This includes: unattributed Vectorize dimensions, Workers AI neurons (no per-project GraphQL)
1039
+ // and catch-all cost subtraction (account total - sum of known projects)
1040
+ const accountTotalCost = usage.workers.reduce((sum, w) => sum + w.requests * 0, 0); // Placeholder
1041
+ const attributedProjectsCost = projectUsage.reduce((sum, p) => sum + p.total, 0);
1042
+ const unattributedCostRemainder = Math.max(0, accountTotalCost - attributedProjectsCost);
1043
+
1044
+ // Only create _unattributed row if there's something to track
1045
+ const hasUnattributedVectorize = vectorizeAttribution.unattributed > 0;
1046
+ const hasUnattributedAINeurons = workersAINeuronData.totalNeurons > 0;
1047
+
1048
+ if (hasUnattributedVectorize || hasUnattributedAINeurons) {
1049
+ log.info('Unattributed usage', {
1050
+ vectorizeDimensions: vectorizeAttribution.unattributed,
1051
+ workersAINeurons: workersAINeuronData.totalNeurons,
1052
+ });
1053
+
1054
+ await env.PLATFORM_DB.prepare(
1055
+ `
1056
+ INSERT INTO hourly_usage_snapshots (
1057
+ id, snapshot_hour, project,
1058
+ vectorize_dimensions, workersai_neurons, workersai_cost_usd,
1059
+ total_cost_usd, collection_timestamp, sampling_mode
1060
+ ) VALUES (?, ?, '_unattributed', ?, ?, ?, ?, ?, ?)
1061
+ ON CONFLICT (snapshot_hour, project) DO UPDATE SET
1062
+ vectorize_dimensions = excluded.vectorize_dimensions,
1063
+ workersai_neurons = excluded.workersai_neurons,
1064
+ workersai_cost_usd = excluded.workersai_cost_usd,
1065
+ total_cost_usd = excluded.total_cost_usd,
1066
+ collection_timestamp = excluded.collection_timestamp
1067
+ `
1068
+ )
1069
+ .bind(
1070
+ generateId(),
1071
+ snapshotHour,
1072
+ vectorizeAttribution.unattributed,
1073
+ workersAINeuronData.totalNeurons,
1074
+ // Workers AI cost: $0.011 per 1000 neurons (after free tier)
1075
+ (Math.max(0, workersAINeuronData.totalNeurons - 10000) / 1000) * 0.011,
1076
+ unattributedCostRemainder,
1077
+ Math.floor(Date.now() / 1000),
1078
+ samplingModeStr
1079
+ )
1080
+ .run();
1081
+ totalD1Writes++;
1082
+ }
1083
+
1084
+ // 6.5 Persist resource-level snapshots for multi-level aggregation
1085
+ totalD1Writes += await persistResourceUsageSnapshots(
1086
+ trackedEnv,
1087
+ snapshotHour,
1088
+ usage,
1089
+ queuesData,
1090
+ workflowsData
1091
+ );
1092
+
1093
+ // 7. Collect external metrics in parallel (once daily at midnight)
1094
+ if (currentHour === 0) {
1095
+ // Collect all external providers in parallel via the collector framework
1096
+ // TODO: Register your collectors in workers/lib/usage/collectors/index.ts
1097
+ const externalMetrics = await collectExternalMetrics(trackedEnv);
1098
+ if (externalMetrics.errors.length > 0) {
1099
+ log.warn('Some external providers failed', { failedProviders: externalMetrics.errors });
1100
+ }
1101
+
1102
+ // Persist collected external metrics to D1 third_party_usage table.
1103
+ // The collector framework returns results keyed by collector name.
1104
+ // TODO: Add your own persistence logic for each registered collector.
1105
+ // See workers/lib/usage/collectors/example.ts for the collector template.
1106
+ //
1107
+ // Example:
1108
+ // const myProviderData = externalMetrics.results['my-provider'];
1109
+ // if (myProviderData) {
1110
+ // await persistThirdPartyUsage(trackedEnv, today, 'my-provider', 'metric_name', value, 'unit', cost);
1111
+ // totalD1Writes++;
1112
+ // }
1113
+
1114
+ // 7b. Collect and persist Cloudflare subscription data (once daily at midnight)
1115
+ const cfSubscriptions = await graphql.getAccountSubscriptions();
1116
+ if (cfSubscriptions) {
1117
+ // Persist each subscription
1118
+ for (const sub of cfSubscriptions.subscriptions) {
1119
+ await persistThirdPartyUsage(
1120
+ env,
1121
+ today,
1122
+ 'cloudflare',
1123
+ 'subscription',
1124
+ sub.price,
1125
+ sub.frequency,
1126
+ sub.price,
1127
+ sub.ratePlanName
1128
+ );
1129
+ totalD1Writes++;
1130
+ }
1131
+
1132
+ // Persist summary flags
1133
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'has_workers_paid', cfSubscriptions.hasWorkersPaid ? 1 : 0, 'boolean', 0);
1134
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'has_r2_paid', cfSubscriptions.hasR2Paid ? 1 : 0, 'boolean', 0);
1135
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'has_analytics_engine', cfSubscriptions.hasAnalyticsEngine ? 1 : 0, 'boolean', 0);
1136
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'monthly_base_cost', cfSubscriptions.monthlyBaseCost, 'usd', cfSubscriptions.monthlyBaseCost);
1137
+ totalD1Writes += 4;
1138
+
1139
+ // Persist plan inclusions (free tier amounts)
1140
+ const inclusions = cfSubscriptions.planInclusions;
1141
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'workers_requests_included', inclusions.requestsIncluded, 'requests', 0);
1142
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'workers_cpu_time_included', inclusions.cpuTimeIncluded, 'ms', 0);
1143
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'd1_rows_read_included', inclusions.d1RowsReadIncluded, 'rows', 0);
1144
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'd1_rows_written_included', inclusions.d1RowsWrittenIncluded, 'rows', 0);
1145
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'd1_storage_included', inclusions.d1StorageIncluded, 'bytes', 0);
1146
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'kv_reads_included', inclusions.kvReadsIncluded, 'reads', 0);
1147
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'kv_writes_included', inclusions.kvWritesIncluded, 'writes', 0);
1148
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'kv_storage_included', inclusions.kvStorageIncluded, 'bytes', 0);
1149
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'r2_class_a_included', inclusions.r2ClassAIncluded, 'operations', 0);
1150
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'r2_class_b_included', inclusions.r2ClassBIncluded, 'operations', 0);
1151
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'r2_storage_included', inclusions.r2StorageIncluded, 'bytes', 0);
1152
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'do_requests_included', inclusions.doRequestsIncluded, 'requests', 0);
1153
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'do_duration_included', inclusions.doDurationIncluded, 'gb_seconds', 0);
1154
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'do_storage_included', inclusions.doStorageIncluded, 'bytes', 0);
1155
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'vectorize_queried_dimensions_included', inclusions.vectorizeQueriedDimensionsIncluded, 'dimensions', 0);
1156
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'vectorize_stored_dimensions_included', inclusions.vectorizeStoredDimensionsIncluded, 'dimensions', 0);
1157
+ await persistThirdPartyUsage(trackedEnv, today, 'cloudflare', 'queues_operations_included', inclusions.queuesOperationsIncluded, 'operations', 0);
1158
+ totalD1Writes += 17;
1159
+
1160
+ log.info('Collected subscriptions', {
1161
+ plansCount: cfSubscriptions.subscriptions.length,
1162
+ hasWorkersPaid: cfSubscriptions.hasWorkersPaid,
1163
+ monthlyBaseCost: cfSubscriptions.monthlyBaseCost,
1164
+ });
1165
+ }
1166
+
1167
+ log.info('Completed third-party provider collection');
1168
+ }
1169
+
1170
+ // 7.5 Send hourly P1 error digest (runs every hour)
1171
+ // Aggregates errors from the last hour and sends digest if thresholds exceeded
1172
+ try {
1173
+ await sendHourlyErrorDigest(trackedEnv);
1174
+ log.info('Hourly P1 error digest check complete');
1175
+ } catch (error) {
1176
+ log.error('Failed to send hourly error digest', error);
1177
+ }
1178
+
1179
+ // 7.6 Hourly D1 write anomaly detection (catches spikes within hours, not days)
1180
+ try {
1181
+ const hourlyAnomalies = await detectHourlyD1WriteAnomalies(trackedEnv);
1182
+ if (hourlyAnomalies > 0) {
1183
+ log.info('Hourly D1 write anomaly detected', { hourlyAnomalies });
1184
+ totalD1Writes += hourlyAnomalies; // Each anomaly = 1 D1 write (recordAnomaly)
1185
+ }
1186
+ } catch (error) {
1187
+ log.error('Failed hourly D1 write anomaly check', error);
1188
+ }
1189
+
1190
+ // 8. Run daily rollup at midnight for yesterday
1191
+ if (currentHour === 0) {
1192
+ const yesterday = new Date();
1193
+ yesterday.setUTCDate(yesterday.getUTCDate() - 1);
1194
+ const yesterdayStr = yesterday.toISOString().split('T')[0];
1195
+ totalD1Writes += await runDailyRollup(trackedEnv, yesterdayStr);
1196
+
1197
+ // Run AI model breakdown daily rollups
1198
+ totalD1Writes += await runWorkersAIModelDailyRollup(trackedEnv, yesterdayStr);
1199
+ totalD1Writes += await runAIGatewayModelDailyRollup(trackedEnv, yesterdayStr);
1200
+ log.info('Completed AI model breakdown daily rollups', { date: yesterdayStr });
1201
+
1202
+ // Run feature-level usage rollup from Analytics Engine (SDK telemetry)
1203
+ // This aggregates D1, KV, AI, Vectorize metrics captured by Platform SDK
1204
+ totalD1Writes += await runFeatureUsageDailyRollup(trackedEnv, yesterdayStr);
1205
+ log.info('Completed feature usage daily rollup', { date: yesterdayStr });
1206
+
1207
+ // Self-healing: Fix any gaps in daily rollups from previous issues
1208
+ const gapsFilled = await backfillMissingDays(trackedEnv);
1209
+ if (gapsFilled > 0) {
1210
+ log.info('Gap-fill fixed days with missing data', { daysFilled: gapsFilled });
1211
+ totalD1Writes += gapsFilled; // Each day = 1 rollup write (approx)
1212
+ }
1213
+
1214
+ // Calculate usage vs allowance percentages (after rollups complete)
1215
+ totalD1Writes += await calculateUsageVsAllowancePercentages(trackedEnv, today);
1216
+
1217
+ // Invalidate daily cache to ensure fresh data is served after rollups
1218
+ await invalidateDailyCache(trackedEnv);
1219
+
1220
+ // Run monthly rollup on 1st of month for previous month
1221
+ if (new Date().getUTCDate() === 1) {
1222
+ const lastMonth = new Date();
1223
+ lastMonth.setUTCMonth(lastMonth.getUTCMonth() - 1);
1224
+ const lastMonthStr = lastMonth.toISOString().slice(0, 7);
1225
+ totalD1Writes += await runMonthlyRollup(trackedEnv, lastMonthStr);
1226
+ }
1227
+
1228
+ // Cleanup old data
1229
+ const cleanup = await cleanupOldData(trackedEnv);
1230
+ totalD1Writes += cleanup.hourlyDeleted + cleanup.dailyDeleted;
1231
+
1232
+ // Run anomaly detection at midnight
1233
+ const anomalies = await detectAnomalies(trackedEnv);
1234
+ if (anomalies > 0) {
1235
+ log.info('Detected anomalies', { anomalies });
1236
+ totalD1Writes += anomalies; // Each anomaly = 1 D1 write
1237
+ }
1238
+
1239
+ // Check monthly budget limits (sums daily_usage_rollups for current month)
1240
+ try {
1241
+ const monthlyViolations = await checkMonthlyBudgets(trackedEnv);
1242
+ if (monthlyViolations > 0) {
1243
+ log.info('Monthly budget violations detected', { monthlyViolations });
1244
+ }
1245
+ } catch (error) {
1246
+ log.error('Failed monthly budget check', error);
1247
+ }
1248
+
1249
+ // Run dataset registry discovery weekly (Sunday at midnight UTC)
1250
+ const dayOfWeek = new Date().getUTCDay();
1251
+ if (dayOfWeek === 0) {
1252
+ const registryResult = await discoverAndUpdateDatasetRegistry(trackedEnv);
1253
+ totalD1Writes += registryResult.d1Writes;
1254
+ log.info('Dataset registry updated', {
1255
+ datasetsChecked: registryResult.datasetsChecked,
1256
+ newBillableAlerts: registryResult.newBillableAlerts,
1257
+ });
1258
+ }
1259
+
1260
+ // Send daily P2 error summary (runs at midnight UTC)
1261
+ try {
1262
+ await sendDailyErrorSummary(trackedEnv);
1263
+ log.info('Daily P2 error summary sent');
1264
+ } catch (error) {
1265
+ log.error('Failed to send daily error summary', error);
1266
+ }
1267
+
1268
+ // Cleanup old error events (7-day retention)
1269
+ try {
1270
+ const errorEventsDeleted = await cleanupOldErrorEvents(trackedEnv);
1271
+ if (errorEventsDeleted > 0) {
1272
+ totalD1Writes += 1;
1273
+ log.info('Cleaned up old error events', { deleted: errorEventsDeleted });
1274
+ }
1275
+ } catch (error) {
1276
+ log.error('Failed to cleanup error events', error);
1277
+ }
1278
+ }
1279
+
1280
+ // 9. Update D1 write counter
1281
+ await incrementD1WriteCount(trackedEnv, totalD1Writes);
1282
+
1283
+ // 10. Check and trip circuit breakers if needed
1284
+ await checkAndTripCircuitBreakers(trackedEnv);
1285
+
1286
+ // 11. Send Platform SDK heartbeat
1287
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1288
+ await health(HEARTBEAT_HEALTH, env.PLATFORM_CACHE as any, env.PLATFORM_TELEMETRY, ctx);
1289
+ log.debug('Heartbeat sent');
1290
+
1291
+ // Signal success to Gatus heartbeat
1292
+ pingHeartbeat(ctx, env.GATUS_HEARTBEAT_URL, env.GATUS_TOKEN, true);
1293
+
1294
+ const duration = Date.now() - startTime;
1295
+ log.info('Collection complete', { durationMs: duration, d1Writes: totalD1Writes });
1296
+ } catch (error) {
1297
+ const errorMessage = error instanceof Error ? error.message : String(error);
1298
+ log.error('Error during collection', undefined, { errorMessage });
1299
+
1300
+ // Signal failure to Gatus heartbeat
1301
+ pingHeartbeat(ctx, env.GATUS_HEARTBEAT_URL, env.GATUS_TOKEN, false);
1302
+
1303
+ // Send alert on failure
1304
+ if (env.SLACK_WEBHOOK_URL) {
1305
+ await sendSlackAlert(trackedEnv, {
1306
+ text: ':warning: Platform Usage Collection Failed',
1307
+ attachments: [
1308
+ {
1309
+ color: 'warning',
1310
+ fields: [
1311
+ { title: 'Error', value: errorMessage, short: false },
1312
+ { title: 'Hour', value: snapshotHour, short: true },
1313
+ { title: 'Sampling Mode', value: SamplingMode[samplingMode], short: true },
1314
+ ],
1315
+ },
1316
+ ],
1317
+ });
1318
+ }
1319
+ }
1320
+ }
1321
+ // =============================================================================
1322
+ // WORKER EXPORT
1323
+ // =============================================================================
1324
+
1325
+ export default {
1326
+ // Queue consumer - processes SDK telemetry messages and DLQ
1327
+ async queue(batch: MessageBatch<TelemetryMessage>, env: Env): Promise<void> {
1328
+ // Dispatch to appropriate handler based on queue name
1329
+ if (batch.queue === 'platform-telemetry-dlq') {
1330
+ await handleDLQ(batch, env);
1331
+ } else {
1332
+ await handleQueue(batch, env);
1333
+ }
1334
+ },
1335
+
1336
+ // Scheduled handler - runs daily at midnight UTC
1337
+ async scheduled(event: ScheduledEvent, env: Env, ctx: ExecutionContext): Promise<void> {
1338
+ await handleScheduled(event, env, ctx);
1339
+ },
1340
+
1341
+ // HTTP handler - API endpoints
1342
+ async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise<Response> {
1343
+ const url = new URL(request.url);
1344
+ const path = url.pathname;
1345
+
1346
+ // CORS headers
1347
+ const corsHeaders = {
1348
+ 'Access-Control-Allow-Origin': '*',
1349
+ 'Access-Control-Allow-Methods': 'GET, POST, PUT, OPTIONS',
1350
+ 'Access-Control-Allow-Headers': 'Content-Type',
1351
+ };
1352
+
1353
+ // Handle CORS preflight
1354
+ if (request.method === 'OPTIONS') {
1355
+ return new Response(null, { headers: corsHeaders });
1356
+ }
1357
+
1358
+ // Test error endpoint - triggers an intentional error for testing the error collection pipeline
1359
+ // Usage: GET /test-error?type=exception|soft|warning
1360
+ if (path === '/test-error') {
1361
+ const errorType = url.searchParams.get('type') || 'exception';
1362
+
1363
+ if (errorType === 'soft') {
1364
+ console.error(
1365
+ 'TEST SOFT ERROR: This is a test soft error from platform-usage /test-error endpoint'
1366
+ );
1367
+ return new Response(JSON.stringify({ triggered: 'soft_error', worker: 'platform-usage' }), {
1368
+ headers: { ...corsHeaders, 'Content-Type': 'application/json' },
1369
+ });
1370
+ }
1371
+
1372
+ if (errorType === 'warning') {
1373
+ console.warn(
1374
+ 'TEST WARNING: This is a test warning from platform-usage /test-error endpoint'
1375
+ );
1376
+ return new Response(JSON.stringify({ triggered: 'warning', worker: 'platform-usage' }), {
1377
+ headers: { ...corsHeaders, 'Content-Type': 'application/json' },
1378
+ });
1379
+ }
1380
+
1381
+ // Default: throw an exception
1382
+ throw new Error(
1383
+ 'TEST EXCEPTION: This is a test exception from platform-usage /test-error endpoint'
1384
+ );
1385
+ }
1386
+
1387
+ // Create trace context and logger for request tracking
1388
+ const traceContext = createTraceContext(request, env);
1389
+ const log = createLoggerFromRequest(request, env, 'platform-usage', 'platform:usage:api');
1390
+
1391
+ log.info('Request received', {
1392
+ method: request.method,
1393
+ path,
1394
+ traceId: traceContext.traceId,
1395
+ spanId: traceContext.spanId,
1396
+ });
1397
+
1398
+ try {
1399
+ // Wrap env with Platform SDK for automatic metric tracking
1400
+ // Note: platform:usage:api tracks this worker's own API usage
1401
+ // The SDK will track all D1/KV/AI operations and report via PLATFORM_TELEMETRY queue
1402
+
1403
+ const trackedEnv = withFeatureBudget(env, 'platform:usage:api', {
1404
+ ctx,
1405
+ cacheKv: env.PLATFORM_CACHE as any, // Type assertion for KVNamespace compatibility
1406
+ telemetryQueue: env.PLATFORM_TELEMETRY,
1407
+ checkCircuitBreaker: false, // Don't block API requests - this is the control plane
1408
+ });
1409
+
1410
+ // Handle settings verify endpoint (GET only)
1411
+ // Returns all settings from D1 and validates completeness
1412
+ if (path === '/usage/settings/verify' || path === '/api/usage/settings/verify') {
1413
+ if (request.method !== 'GET') {
1414
+ const response = jsonResponse({ error: 'Method not allowed' }, 405);
1415
+ const headers = new Headers(response.headers);
1416
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1417
+ return new Response(response.body, { status: response.status, headers });
1418
+ }
1419
+ const response = await handleSettingsVerify(trackedEnv);
1420
+ const headers = new Headers(response.headers);
1421
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1422
+ return new Response(response.body, { status: response.status, headers });
1423
+ }
1424
+
1425
+ // Handle settings endpoint (supports GET and PUT)
1426
+ // Support both direct paths (/usage/settings) and proxied paths (/api/usage/settings)
1427
+ if (path === '/usage/settings' || path === '/api/usage/settings') {
1428
+ let response: Response;
1429
+ if (request.method === 'GET') {
1430
+ response = await handleGetSettings(trackedEnv);
1431
+ } else if (request.method === 'PUT') {
1432
+ response = await handlePutSettings(request, trackedEnv);
1433
+ } else {
1434
+ response = jsonResponse({ error: 'Method not allowed' }, 405);
1435
+ }
1436
+ // Add CORS headers to response
1437
+ const headers = new Headers(response.headers);
1438
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1439
+ return new Response(response.body, { status: response.status, headers });
1440
+ }
1441
+
1442
+ // Handle manual trigger for testing (POST)
1443
+ // Supports ?forceHour=0 to test midnight-only functions
1444
+ if (
1445
+ (path === '/usage/trigger' || path === '/api/usage/trigger') &&
1446
+ request.method === 'POST'
1447
+ ) {
1448
+ const forceHour = url.searchParams.get('forceHour');
1449
+ let scheduledTime = Date.now();
1450
+
1451
+ // If forceHour is specified, create a fake time at that hour
1452
+ const log = createLoggerFromEnv(env, 'platform-usage', 'platform:usage:trigger');
1453
+ if (forceHour !== null) {
1454
+ const hour = parseInt(forceHour, 10);
1455
+ if (!isNaN(hour) && hour >= 0 && hour <= 23) {
1456
+ const fakeDate = new Date();
1457
+ fakeDate.setUTCHours(hour, 0, 0, 0);
1458
+ scheduledTime = fakeDate.getTime();
1459
+ log.info('Manual trigger requested with forceHour - running synchronously', {
1460
+ forceHour: hour,
1461
+ });
1462
+ } else {
1463
+ log.info('Manual trigger requested - running synchronously');
1464
+ }
1465
+ } else {
1466
+ log.info('Manual trigger requested - running synchronously');
1467
+ }
1468
+
1469
+ // Create a fake scheduled event for testing
1470
+ const fakeEvent = {
1471
+ scheduledTime,
1472
+ cron: '0 * * * *',
1473
+ } as ScheduledEvent;
1474
+ const ctx = {
1475
+ waitUntil: (promise: Promise<unknown>) =>
1476
+ promise.catch((err) => log.error('waitUntil error', err)),
1477
+ passThroughOnException: () => {},
1478
+ props: {},
1479
+ } as unknown as ExecutionContext;
1480
+ try {
1481
+ // Run synchronously to see any errors
1482
+ await handleScheduled(fakeEvent, env, ctx);
1483
+ const response = jsonResponse({ success: true, message: 'Scheduled handler completed' });
1484
+ const headers = new Headers(response.headers);
1485
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1486
+ return new Response(response.body, { status: response.status, headers });
1487
+ } catch (error) {
1488
+ log.error('Error', error);
1489
+ const response = jsonResponse({ success: false, error: String(error) }, 500);
1490
+ const headers = new Headers(response.headers);
1491
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1492
+ return new Response(response.body, { status: response.status, headers });
1493
+ }
1494
+ }
1495
+
1496
+ // Handle direct daily rollup for a specific date (POST)
1497
+ // Supports ?date=YYYY-MM-DD to re-rollup a specific day from hourly data
1498
+ if ((path === '/usage/rollup' || path === '/api/usage/rollup') && request.method === 'POST') {
1499
+ const dateParam = url.searchParams.get('date');
1500
+ if (!dateParam) {
1501
+ return jsonResponse({ error: 'Missing required param: date (YYYY-MM-DD)' }, 400);
1502
+ }
1503
+ // Validate date format
1504
+ const dateRegex = /^\d{4}-\d{2}-\d{2}$/;
1505
+ if (!dateRegex.test(dateParam)) {
1506
+ return jsonResponse({ error: 'Invalid date format. Use YYYY-MM-DD.' }, 400);
1507
+ }
1508
+ const rollupLog = createLoggerFromEnv(env, 'platform-usage', 'platform:usage:rollup');
1509
+ rollupLog.info('Manual rollup requested', { date: dateParam });
1510
+ try {
1511
+ const changes = await runDailyRollup(trackedEnv, dateParam);
1512
+ const response = jsonResponse({
1513
+ success: true,
1514
+ date: dateParam,
1515
+ changes,
1516
+ message: `Daily rollup completed for ${dateParam}`,
1517
+ });
1518
+ const headers = new Headers(response.headers);
1519
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1520
+ return new Response(response.body, { status: response.status, headers });
1521
+ } catch (error) {
1522
+ rollupLog.error('Error', error);
1523
+ const response = jsonResponse({ success: false, error: String(error) }, 500);
1524
+ const headers = new Headers(response.headers);
1525
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1526
+ return new Response(response.body, { status: response.status, headers });
1527
+ }
1528
+ }
1529
+
1530
+ // Handle circuit breaker reset (POST)
1531
+ if (
1532
+ (path === '/usage/reset-circuit-breaker' || path === '/api/usage/reset-circuit-breaker') &&
1533
+ request.method === 'POST'
1534
+ ) {
1535
+ const response = await handleResetCircuitBreaker(request, trackedEnv);
1536
+ const headers = new Headers(response.headers);
1537
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1538
+ return new Response(response.body, { status: response.status, headers });
1539
+ }
1540
+
1541
+ // Handle circuit breaker status (GET)
1542
+ if (
1543
+ (path === '/usage/circuit-breaker-status' ||
1544
+ path === '/api/usage/circuit-breaker-status') &&
1545
+ request.method === 'GET'
1546
+ ) {
1547
+ const response = await handleCircuitBreakerStatus(trackedEnv);
1548
+ const headers = new Headers(response.headers);
1549
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1550
+ return new Response(response.body, { status: response.status, headers });
1551
+ }
1552
+
1553
+ // Handle live usage endpoint (GET) - real-time KV data with API key auth
1554
+ if ((path === '/usage/live' || path === '/api/usage/live') && request.method === 'GET') {
1555
+ const response = await handleLiveUsage(request, trackedEnv);
1556
+ const headers = new Headers(response.headers);
1557
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1558
+ return new Response(response.body, { status: response.status, headers });
1559
+ }
1560
+
1561
+ // Handle backfill endpoint (POST) - task-27.3
1562
+ if (
1563
+ (path === '/usage/backfill' || path === '/api/usage/backfill') &&
1564
+ request.method === 'POST'
1565
+ ) {
1566
+ const response = await handleBackfill(request, trackedEnv);
1567
+ const headers = new Headers(response.headers);
1568
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1569
+ return new Response(response.body, { status: response.status, headers });
1570
+ }
1571
+
1572
+ // ==========================================================================
1573
+ // GAP DETECTION ENDPOINTS
1574
+ // ==========================================================================
1575
+
1576
+ // GET /usage/gaps - Current gap status
1577
+ if ((path === '/usage/gaps' || path === '/api/usage/gaps') && request.method === 'GET') {
1578
+ const response = await handleGapsStatus(trackedEnv);
1579
+ const headers = new Headers(response.headers);
1580
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1581
+ return new Response(response.body, { status: response.status, headers });
1582
+ }
1583
+
1584
+ // GET /usage/gaps/history - Gap detection history
1585
+ if (
1586
+ (path === '/usage/gaps/history' || path === '/api/usage/gaps/history') &&
1587
+ request.method === 'GET'
1588
+ ) {
1589
+ const response = await handleGapsHistory(trackedEnv, url);
1590
+ const headers = new Headers(response.headers);
1591
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1592
+ return new Response(response.body, { status: response.status, headers });
1593
+ }
1594
+
1595
+ // POST /usage/gaps/backfill - Trigger backfill for date range
1596
+ if (
1597
+ (path === '/usage/gaps/backfill' || path === '/api/usage/gaps/backfill') &&
1598
+ request.method === 'POST'
1599
+ ) {
1600
+ const response = await handleGapsBackfill(request, trackedEnv);
1601
+ const headers = new Headers(response.headers);
1602
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1603
+ return new Response(response.body, { status: response.status, headers });
1604
+ }
1605
+
1606
+ // GET /usage/gaps/backfill/history - Backfill history
1607
+ if (
1608
+ (path === '/usage/gaps/backfill/history' || path === '/api/usage/gaps/backfill/history') &&
1609
+ request.method === 'GET'
1610
+ ) {
1611
+ const response = await handleBackfillHistory(trackedEnv, url);
1612
+ const headers = new Headers(response.headers);
1613
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1614
+ return new Response(response.body, { status: response.status, headers });
1615
+ }
1616
+
1617
+ // GET /usage/gaps/projects - Per-project health status
1618
+ if (
1619
+ (path === '/usage/gaps/projects' || path === '/api/usage/gaps/projects') &&
1620
+ request.method === 'GET'
1621
+ ) {
1622
+ const response = await handleProjectsHealth(trackedEnv);
1623
+ const headers = new Headers(response.headers);
1624
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1625
+ return new Response(response.body, { status: response.status, headers });
1626
+ }
1627
+
1628
+ // ==========================================================================
1629
+ // AUDIT ENDPOINTS (Phase 2 Usage Capture Audit)
1630
+ // ==========================================================================
1631
+
1632
+ // GET /usage/audit - Latest comprehensive audit report
1633
+ if ((path === '/usage/audit' || path === '/api/usage/audit') && request.method === 'GET') {
1634
+ const response = await handleGetAudit(trackedEnv);
1635
+ const headers = new Headers(response.headers);
1636
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1637
+ return new Response(response.body, { status: response.status, headers });
1638
+ }
1639
+
1640
+ // GET /usage/audit/history - Comprehensive audit history
1641
+ if (
1642
+ (path === '/usage/audit/history' || path === '/api/usage/audit/history') &&
1643
+ request.method === 'GET'
1644
+ ) {
1645
+ const response = await handleGetAuditHistory(request, trackedEnv);
1646
+ const headers = new Headers(response.headers);
1647
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1648
+ return new Response(response.body, { status: response.status, headers });
1649
+ }
1650
+
1651
+ // GET /usage/audit/attribution - Latest attribution report
1652
+ if (
1653
+ (path === '/usage/audit/attribution' || path === '/api/usage/audit/attribution') &&
1654
+ request.method === 'GET'
1655
+ ) {
1656
+ const response = await handleGetAttribution(trackedEnv);
1657
+ const headers = new Headers(response.headers);
1658
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1659
+ return new Response(response.body, { status: response.status, headers });
1660
+ }
1661
+
1662
+ // GET /usage/audit/features - Latest feature coverage report
1663
+ if (
1664
+ (path === '/usage/audit/features' || path === '/api/usage/audit/features') &&
1665
+ request.method === 'GET'
1666
+ ) {
1667
+ const response = await handleGetFeatureCoverage(request, trackedEnv);
1668
+ const headers = new Headers(response.headers);
1669
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1670
+ return new Response(response.body, { status: response.status, headers });
1671
+ }
1672
+
1673
+ // ==========================================================================
1674
+ // BEHAVIORAL ANALYSIS ENDPOINTS
1675
+ // ==========================================================================
1676
+
1677
+ // GET /usage/audit/behavioral - Combined hotspots + regressions summary
1678
+ if (
1679
+ (path === '/usage/audit/behavioral' || path === '/api/usage/audit/behavioral') &&
1680
+ request.method === 'GET'
1681
+ ) {
1682
+ const response = await handleGetBehavioral(request, trackedEnv);
1683
+ const headers = new Headers(response.headers);
1684
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1685
+ return new Response(response.body, { status: response.status, headers });
1686
+ }
1687
+
1688
+ // GET /usage/audit/behavioral/hotspots - File hotspots with risk scoring
1689
+ if (
1690
+ (path === '/usage/audit/behavioral/hotspots' || path === '/api/usage/audit/behavioral/hotspots') &&
1691
+ request.method === 'GET'
1692
+ ) {
1693
+ const response = await handleGetHotspots(request, trackedEnv);
1694
+ const headers = new Headers(response.headers);
1695
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1696
+ return new Response(response.body, { status: response.status, headers });
1697
+ }
1698
+
1699
+ // GET /usage/audit/behavioral/regressions - SDK regressions
1700
+ if (
1701
+ (path === '/usage/audit/behavioral/regressions' || path === '/api/usage/audit/behavioral/regressions') &&
1702
+ request.method === 'GET'
1703
+ ) {
1704
+ const response = await handleGetRegressions(request, trackedEnv);
1705
+ const headers = new Headers(response.headers);
1706
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1707
+ return new Response(response.body, { status: response.status, headers });
1708
+ }
1709
+
1710
+ // POST /usage/audit/behavioral/regressions/:id/acknowledge - Acknowledge regression
1711
+ const acknowledgeMatch = path.match(/^\/(?:api\/)?usage\/audit\/behavioral\/regressions\/(\d+)\/acknowledge$/);
1712
+ if (acknowledgeMatch && request.method === 'POST') {
1713
+ const response = await handleAcknowledgeRegression(request, trackedEnv, acknowledgeMatch[1]);
1714
+ const headers = new Headers(response.headers);
1715
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1716
+ return new Response(response.body, { status: response.status, headers });
1717
+ }
1718
+
1719
+ // ==========================================================================
1720
+ // DLQ ADMIN ENDPOINTS
1721
+ // ==========================================================================
1722
+
1723
+ // GET /admin/dlq - List DLQ messages
1724
+ if ((path === '/admin/dlq' || path === '/api/admin/dlq') && request.method === 'GET') {
1725
+ const response = await handleListDLQ(url, trackedEnv);
1726
+ const headers = new Headers(response.headers);
1727
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1728
+ return new Response(response.body, { status: response.status, headers });
1729
+ }
1730
+
1731
+ // GET /admin/dlq/stats - Get DLQ statistics
1732
+ if (
1733
+ (path === '/admin/dlq/stats' || path === '/api/admin/dlq/stats') &&
1734
+ request.method === 'GET'
1735
+ ) {
1736
+ const response = await handleDLQStats(trackedEnv);
1737
+ const headers = new Headers(response.headers);
1738
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1739
+ return new Response(response.body, { status: response.status, headers });
1740
+ }
1741
+
1742
+ // POST /admin/dlq/replay-all - Replay all pending DLQ messages
1743
+ if (
1744
+ (path === '/admin/dlq/replay-all' || path === '/api/admin/dlq/replay-all') &&
1745
+ request.method === 'POST'
1746
+ ) {
1747
+ const response = await handleReplayAllDLQ(url, trackedEnv);
1748
+ const headers = new Headers(response.headers);
1749
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1750
+ return new Response(response.body, { status: response.status, headers });
1751
+ }
1752
+
1753
+ // POST /admin/dlq/:id/replay - Replay a specific DLQ message
1754
+ const replayMatch = path.match(/^\/(?:api\/)?admin\/dlq\/([^/]+)\/replay$/);
1755
+ if (replayMatch && request.method === 'POST') {
1756
+ const messageId = replayMatch[1];
1757
+ const response = await handleReplayDLQ(messageId, trackedEnv);
1758
+ const headers = new Headers(response.headers);
1759
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1760
+ return new Response(response.body, { status: response.status, headers });
1761
+ }
1762
+
1763
+ // POST /admin/dlq/:id/discard - Discard a specific DLQ message
1764
+ const discardMatch = path.match(/^\/(?:api\/)?admin\/dlq\/([^/]+)\/discard$/);
1765
+ if (discardMatch && request.method === 'POST') {
1766
+ const messageId = discardMatch[1];
1767
+ const body = await request.json().catch(() => ({}));
1768
+ const reason = (body as { reason?: string }).reason || 'Manually discarded';
1769
+ const response = await handleDiscardDLQ(messageId, reason, trackedEnv);
1770
+ const headers = new Headers(response.headers);
1771
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1772
+ return new Response(response.body, { status: response.status, headers });
1773
+ }
1774
+
1775
+ // Handle feature circuit breakers (GET/PUT) - Phase 4
1776
+ if (
1777
+ path === '/usage/features/circuit-breakers' ||
1778
+ path === '/api/usage/features/circuit-breakers'
1779
+ ) {
1780
+ let response: Response;
1781
+ if (request.method === 'GET') {
1782
+ response = await handleGetFeatureCircuitBreakers(trackedEnv);
1783
+ } else if (request.method === 'PUT') {
1784
+ response = await handlePutFeatureCircuitBreakers(request, trackedEnv);
1785
+ } else {
1786
+ response = jsonResponse({ error: 'Method not allowed' }, 405);
1787
+ }
1788
+ const headers = new Headers(response.headers);
1789
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1790
+ return new Response(response.body, { status: response.status, headers });
1791
+ }
1792
+
1793
+ // Handle feature budgets (GET/PUT) - Phase 4
1794
+ if (path === '/usage/features/budgets' || path === '/api/usage/features/budgets') {
1795
+ let response: Response;
1796
+ if (request.method === 'GET') {
1797
+ response = await handleGetFeatureBudgets(trackedEnv);
1798
+ } else if (request.method === 'PUT') {
1799
+ response = await handlePutFeatureBudgets(request, trackedEnv);
1800
+ } else {
1801
+ response = jsonResponse({ error: 'Method not allowed' }, 405);
1802
+ }
1803
+ const headers = new Headers(response.headers);
1804
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1805
+ return new Response(response.body, { status: response.status, headers });
1806
+ }
1807
+
1808
+ // Handle feature history (GET only) - Phase 5.2
1809
+ if (path === '/usage/features/history' || path === '/api/usage/features/history') {
1810
+ if (request.method !== 'GET') {
1811
+ return jsonResponse({ error: 'Method not allowed' }, 405);
1812
+ }
1813
+ const response = await handleGetFeatureHistory(url, trackedEnv);
1814
+ const headers = new Headers(response.headers);
1815
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1816
+ return new Response(response.body, { status: response.status, headers });
1817
+ }
1818
+
1819
+ // Handle circuit breaker events (GET only) - Phase 5.3
1820
+ if (
1821
+ path === '/usage/features/circuit-breaker-events' ||
1822
+ path === '/api/usage/features/circuit-breaker-events'
1823
+ ) {
1824
+ if (request.method !== 'GET') {
1825
+ return jsonResponse({ error: 'Method not allowed' }, 405);
1826
+ }
1827
+ const response = await handleGetCircuitBreakerEvents(url, trackedEnv);
1828
+ const headers = new Headers(response.headers);
1829
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1830
+ return new Response(response.body, { status: response.status, headers });
1831
+ }
1832
+
1833
+ // Only handle GET requests for other endpoints
1834
+ if (request.method !== 'GET') {
1835
+ return jsonResponse({ error: 'Method not allowed' }, 405);
1836
+ }
1837
+
1838
+ // Route to appropriate handler
1839
+ let response: Response;
1840
+
1841
+ // Support both direct paths (/usage) and proxied paths (/api/usage)
1842
+ if (path === '/usage' || path === '/' || path === '/api/usage') {
1843
+ response = await handleUsage(url, trackedEnv);
1844
+ } else if (path === '/usage/costs' || path === '/api/usage/costs') {
1845
+ response = await handleCosts(url, trackedEnv);
1846
+ } else if (path === '/usage/thresholds' || path === '/api/usage/thresholds') {
1847
+ response = await handleThresholds(url, trackedEnv);
1848
+ } else if (path === '/usage/enhanced' || path === '/api/usage/enhanced') {
1849
+ response = await handleEnhanced(url, trackedEnv);
1850
+ } else if (path === '/usage/compare' || path === '/api/usage/compare') {
1851
+ response = await handleCompare(url, trackedEnv);
1852
+ } else if (path === '/usage/workersai' || path === '/api/usage/workersai') {
1853
+ response = await handleWorkersAI(url, trackedEnv);
1854
+ } else if (path === '/usage/daily' || path === '/api/usage/daily') {
1855
+ response = await handleDaily(url, trackedEnv);
1856
+ } else if (path === '/usage/utilization' || path === '/api/usage/utilization') {
1857
+ response = await handleUtilization(url, trackedEnv);
1858
+ } else if (path === '/usage/status' || path === '/api/usage/status') {
1859
+ response = await handleStatus(url, trackedEnv);
1860
+ } else if (path === '/usage/projects' || path === '/api/usage/projects') {
1861
+ response = await handleProjects(trackedEnv);
1862
+ } else if (path === '/usage/anomalies' || path === '/api/usage/anomalies') {
1863
+ response = await handleAnomalies(url, trackedEnv);
1864
+ } else if (path === '/usage/features' || path === '/api/usage/features') {
1865
+ response = await handleFeatures(url, trackedEnv);
1866
+ } else if (path === '/usage/query' || path === '/api/usage/query') {
1867
+ response = await handleUsageQuery(url, trackedEnv);
1868
+ } else if (path === '/usage/health-trends' || path === '/api/usage/health-trends') {
1869
+ // Phase 2 AI Judge: Health trends for dashboard
1870
+ response = await handleGetHealthTrends(url, trackedEnv);
1871
+ } else if (
1872
+ path === '/usage/health-trends/latest' ||
1873
+ path === '/api/usage/health-trends/latest'
1874
+ ) {
1875
+ // Phase 2 AI Judge: Latest health scores summary
1876
+ response = await handleGetLatestHealthTrends(trackedEnv);
1877
+ } else {
1878
+ response = jsonResponse({ error: 'Not found' }, 404);
1879
+ }
1880
+
1881
+ // Add CORS headers to response
1882
+ const headers = new Headers(response.headers);
1883
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1884
+
1885
+ return new Response(response.body, {
1886
+ status: response.status,
1887
+ headers,
1888
+ });
1889
+ } catch (error) {
1890
+ // Global error handler with full context
1891
+ const errorMessage = error instanceof Error ? error.message : String(error);
1892
+ const errorStack = error instanceof Error ? error.stack : undefined;
1893
+
1894
+ log.error('Unhandled error in fetch handler', {
1895
+ error: errorMessage,
1896
+ stack: errorStack,
1897
+ path,
1898
+ method: request.method,
1899
+ traceId: traceContext.traceId,
1900
+ spanId: traceContext.spanId,
1901
+ });
1902
+
1903
+ const errorResponse = jsonResponse(
1904
+ {
1905
+ error: 'Internal server error',
1906
+ traceId: traceContext.traceId,
1907
+ },
1908
+ 500
1909
+ );
1910
+ const headers = new Headers(errorResponse.headers);
1911
+ Object.entries(corsHeaders).forEach(([key, value]) => headers.set(key, value));
1912
+ return new Response(errorResponse.body, { status: errorResponse.status, headers });
1913
+ }
1914
+ },
1915
+ };