@littlebearapps/platform-admin-sdk 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +112 -0
- package/dist/index.d.ts +16 -0
- package/dist/index.js +89 -0
- package/dist/prompts.d.ts +27 -0
- package/dist/prompts.js +80 -0
- package/dist/scaffold.d.ts +5 -0
- package/dist/scaffold.js +65 -0
- package/dist/templates.d.ts +16 -0
- package/dist/templates.js +131 -0
- package/package.json +46 -0
- package/templates/full/migrations/006_pattern_discovery.sql +199 -0
- package/templates/full/migrations/007_notifications_search.sql +127 -0
- package/templates/full/workers/lib/pattern-discovery/ai-prompt.ts +644 -0
- package/templates/full/workers/lib/pattern-discovery/clustering.ts +278 -0
- package/templates/full/workers/lib/pattern-discovery/shadow-evaluation.ts +603 -0
- package/templates/full/workers/lib/pattern-discovery/storage.ts +806 -0
- package/templates/full/workers/lib/pattern-discovery/types.ts +159 -0
- package/templates/full/workers/lib/pattern-discovery/validation.ts +278 -0
- package/templates/full/workers/pattern-discovery.ts +661 -0
- package/templates/full/workers/platform-alert-router.ts +1809 -0
- package/templates/full/workers/platform-notifications.ts +424 -0
- package/templates/full/workers/platform-search.ts +480 -0
- package/templates/full/workers/platform-settings.ts +436 -0
- package/templates/full/wrangler.alert-router.jsonc.hbs +34 -0
- package/templates/full/wrangler.notifications.jsonc.hbs +23 -0
- package/templates/full/wrangler.pattern-discovery.jsonc.hbs +33 -0
- package/templates/full/wrangler.search.jsonc.hbs +16 -0
- package/templates/full/wrangler.settings.jsonc.hbs +23 -0
- package/templates/shared/README.md.hbs +69 -0
- package/templates/shared/config/budgets.yaml.hbs +72 -0
- package/templates/shared/config/services.yaml.hbs +45 -0
- package/templates/shared/migrations/001_core_tables.sql +117 -0
- package/templates/shared/migrations/002_usage_warehouse.sql +830 -0
- package/templates/shared/migrations/003_feature_tracking.sql +250 -0
- package/templates/shared/migrations/004_settings_alerts.sql +452 -0
- package/templates/shared/migrations/seed.sql.hbs +4 -0
- package/templates/shared/package.json.hbs +21 -0
- package/templates/shared/scripts/sync-config.ts +242 -0
- package/templates/shared/tsconfig.json +12 -0
- package/templates/shared/workers/lib/analytics-engine.ts +357 -0
- package/templates/shared/workers/lib/billing.ts +293 -0
- package/templates/shared/workers/lib/circuit-breaker-middleware.ts +25 -0
- package/templates/shared/workers/lib/control.ts +292 -0
- package/templates/shared/workers/lib/economics.ts +368 -0
- package/templates/shared/workers/lib/metrics.ts +103 -0
- package/templates/shared/workers/lib/platform-settings.ts +407 -0
- package/templates/shared/workers/lib/shared/allowances.ts +333 -0
- package/templates/shared/workers/lib/shared/cloudflare.ts +1362 -0
- package/templates/shared/workers/lib/shared/types.ts +58 -0
- package/templates/shared/workers/lib/telemetry-sampling.ts +360 -0
- package/templates/shared/workers/lib/usage/collectors/example.ts +96 -0
- package/templates/shared/workers/lib/usage/collectors/index.ts +128 -0
- package/templates/shared/workers/lib/usage/handlers/audit.ts +306 -0
- package/templates/shared/workers/lib/usage/handlers/backfill.ts +845 -0
- package/templates/shared/workers/lib/usage/handlers/behavioral.ts +429 -0
- package/templates/shared/workers/lib/usage/handlers/data-queries.ts +507 -0
- package/templates/shared/workers/lib/usage/handlers/dlq-admin.ts +364 -0
- package/templates/shared/workers/lib/usage/handlers/health-trends.ts +222 -0
- package/templates/shared/workers/lib/usage/handlers/index.ts +35 -0
- package/templates/shared/workers/lib/usage/handlers/usage-admin.ts +421 -0
- package/templates/shared/workers/lib/usage/handlers/usage-features.ts +1262 -0
- package/templates/shared/workers/lib/usage/handlers/usage-metrics.ts +2420 -0
- package/templates/shared/workers/lib/usage/handlers/usage-settings.ts +610 -0
- package/templates/shared/workers/lib/usage/queue/budget-enforcement.ts +1032 -0
- package/templates/shared/workers/lib/usage/queue/cost-budget-enforcement.ts +128 -0
- package/templates/shared/workers/lib/usage/queue/cost-calculator.ts +77 -0
- package/templates/shared/workers/lib/usage/queue/dlq-handler.ts +161 -0
- package/templates/shared/workers/lib/usage/queue/index.ts +19 -0
- package/templates/shared/workers/lib/usage/queue/telemetry-processor.ts +790 -0
- package/templates/shared/workers/lib/usage/scheduled/anomaly-detection.ts +732 -0
- package/templates/shared/workers/lib/usage/scheduled/data-collection.ts +956 -0
- package/templates/shared/workers/lib/usage/scheduled/error-digest.ts +343 -0
- package/templates/shared/workers/lib/usage/scheduled/index.ts +18 -0
- package/templates/shared/workers/lib/usage/scheduled/rollups.ts +1561 -0
- package/templates/shared/workers/lib/usage/shared/constants.ts +362 -0
- package/templates/shared/workers/lib/usage/shared/index.ts +14 -0
- package/templates/shared/workers/lib/usage/shared/types.ts +1066 -0
- package/templates/shared/workers/lib/usage/shared/utils.ts +795 -0
- package/templates/shared/workers/platform-usage.ts +1915 -0
- package/templates/shared/wrangler.usage.jsonc.hbs +58 -0
- package/templates/standard/migrations/005_error_collection.sql +162 -0
- package/templates/standard/workers/error-collector.ts +2670 -0
- package/templates/standard/workers/lib/error-collector/capture.ts +213 -0
- package/templates/standard/workers/lib/error-collector/digest.ts +448 -0
- package/templates/standard/workers/lib/error-collector/email-health-alerts.ts +262 -0
- package/templates/standard/workers/lib/error-collector/fingerprint.ts +258 -0
- package/templates/standard/workers/lib/error-collector/gap-alerts.ts +293 -0
- package/templates/standard/workers/lib/error-collector/github.ts +329 -0
- package/templates/standard/workers/lib/error-collector/types.ts +262 -0
- package/templates/standard/workers/lib/sentinel/gap-detection.ts +734 -0
- package/templates/standard/workers/lib/shared/slack-alerts.ts +585 -0
- package/templates/standard/workers/platform-sentinel.ts +1744 -0
- package/templates/standard/wrangler.error-collector.jsonc.hbs +44 -0
- package/templates/standard/wrangler.sentinel.jsonc.hbs +45 -0
|
@@ -0,0 +1,956 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Scheduled Data Collection Module
|
|
3
|
+
*
|
|
4
|
+
* Extracted from platform-usage.ts for the scheduled cron job.
|
|
5
|
+
* Contains functions for collecting and persisting usage data from
|
|
6
|
+
* Cloudflare GraphQL, GitHub billing, and third-party providers.
|
|
7
|
+
*
|
|
8
|
+
* Reference: backlog/tasks/task-62 - Platform-Usage-Migration-Subtasks.md (Phase C.1)
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import type {
|
|
12
|
+
Env,
|
|
13
|
+
MetricDeltas,
|
|
14
|
+
SamplingMode,
|
|
15
|
+
PlatformPricing,
|
|
16
|
+
GitHubUsageItem,
|
|
17
|
+
GitHubPlanInfo,
|
|
18
|
+
GitHubBillingData,
|
|
19
|
+
GitHubPlanInclusions,
|
|
20
|
+
AnthropicUsageData,
|
|
21
|
+
OpenAIUsageData,
|
|
22
|
+
ResendUsageData,
|
|
23
|
+
ApifyUsageData,
|
|
24
|
+
AccountUsage,
|
|
25
|
+
CostBreakdown,
|
|
26
|
+
QueuesMetrics,
|
|
27
|
+
} from '../shared';
|
|
28
|
+
import { SamplingMode as SamplingModeEnum } from '../shared';
|
|
29
|
+
import { generateId, loadPricing, fetchWithRetry } from '../shared';
|
|
30
|
+
import { createLoggerFromEnv } from '@littlebearapps/platform-consumer-sdk';
|
|
31
|
+
import { identifyProject } from '../../shared/cloudflare';
|
|
32
|
+
|
|
33
|
+
// =============================================================================
|
|
34
|
+
// HOURLY SNAPSHOT PERSISTENCE
|
|
35
|
+
// =============================================================================
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Persist hourly usage snapshot to D1.
|
|
39
|
+
*
|
|
40
|
+
* Stores account-level or project-level usage metrics for a single hour.
|
|
41
|
+
* Uses delta values when provided for accurate hourly totals (counters),
|
|
42
|
+
* or raw values for gauges (storage metrics).
|
|
43
|
+
*
|
|
44
|
+
* @param env - Worker environment
|
|
45
|
+
* @param snapshotHour - ISO datetime string (YYYY-MM-DDTHH:00:00Z)
|
|
46
|
+
* @param project - Project identifier ('all', or from project_registry)
|
|
47
|
+
* @param data - AccountUsage from GraphQL
|
|
48
|
+
* @param costs - Cost breakdown from calculateMonthlyCosts
|
|
49
|
+
* @param samplingMode - Current sampling mode
|
|
50
|
+
* @param workflows - Optional workflow metrics
|
|
51
|
+
* @param aiMetrics - Optional AI metrics (Workers AI, Vectorize)
|
|
52
|
+
* @param queues - Optional queue metrics
|
|
53
|
+
* @param deltas - Optional delta values for accurate hourly metrics
|
|
54
|
+
* @returns Number of D1 writes (always 1)
|
|
55
|
+
*/
|
|
56
|
+
export async function persistHourlySnapshot(
|
|
57
|
+
env: Env,
|
|
58
|
+
snapshotHour: string,
|
|
59
|
+
project: string,
|
|
60
|
+
data: AccountUsage,
|
|
61
|
+
costs: CostBreakdown,
|
|
62
|
+
samplingMode: SamplingMode,
|
|
63
|
+
workflows?: {
|
|
64
|
+
executions: number;
|
|
65
|
+
successes: number;
|
|
66
|
+
failures: number;
|
|
67
|
+
wallTimeMs: number;
|
|
68
|
+
cpuTimeMs: number;
|
|
69
|
+
},
|
|
70
|
+
aiMetrics?: {
|
|
71
|
+
workersAINeurons: number;
|
|
72
|
+
workersAIRequests: number;
|
|
73
|
+
vectorizeQueries: number;
|
|
74
|
+
vectorizeVectorsQueried: number;
|
|
75
|
+
},
|
|
76
|
+
queues?: {
|
|
77
|
+
messagesProduced: number;
|
|
78
|
+
messagesConsumed: number;
|
|
79
|
+
},
|
|
80
|
+
deltas?: MetricDeltas
|
|
81
|
+
): Promise<number> {
|
|
82
|
+
const id = generateId();
|
|
83
|
+
const timestamp = Math.floor(Date.now() / 1000);
|
|
84
|
+
|
|
85
|
+
// ==========================================================================
|
|
86
|
+
// METRICS EXTRACTION: Use delta values when provided, else raw cumulative
|
|
87
|
+
// Counters (requests, reads, writes) use deltas; gauges (storage) use raw
|
|
88
|
+
// ==========================================================================
|
|
89
|
+
|
|
90
|
+
// Get Workers metrics (data.workers is WorkersMetrics[])
|
|
91
|
+
// Use delta values for counters if provided (for accurate hourly totals)
|
|
92
|
+
const workersRequests =
|
|
93
|
+
deltas?.workers?.requests ?? data.workers.reduce((sum, w) => sum + w.requests, 0);
|
|
94
|
+
const workersErrors =
|
|
95
|
+
deltas?.workers?.errors ?? data.workers.reduce((sum, w) => sum + w.errors, 0);
|
|
96
|
+
const workersCpuTime =
|
|
97
|
+
deltas?.workers?.cpuTimeMs ?? data.workers.reduce((sum, w) => sum + w.cpuTimeMs, 0);
|
|
98
|
+
// Duration percentiles are point-in-time metrics, not cumulative - use raw values
|
|
99
|
+
const workersDurationP50 =
|
|
100
|
+
data.workers.length > 0
|
|
101
|
+
? data.workers.reduce((sum, w) => sum + w.duration50thMs, 0) / data.workers.length
|
|
102
|
+
: 0;
|
|
103
|
+
const workersDurationP99 =
|
|
104
|
+
data.workers.length > 0 ? Math.max(...data.workers.map((w) => w.duration99thMs)) : 0;
|
|
105
|
+
|
|
106
|
+
// Get D1 metrics (data.d1 is D1Metrics[])
|
|
107
|
+
// Rows read/written are counters (use delta), storage is gauge (use raw)
|
|
108
|
+
const d1RowsRead = deltas?.d1?.rowsRead ?? data.d1.reduce((sum, d) => sum + d.rowsRead, 0);
|
|
109
|
+
const d1RowsWritten =
|
|
110
|
+
deltas?.d1?.rowsWritten ?? data.d1.reduce((sum, d) => sum + d.rowsWritten, 0);
|
|
111
|
+
const d1StorageBytes = data.d1.reduce((sum, d) => sum + (d.storageBytes ?? 0), 0); // Gauge - raw
|
|
112
|
+
|
|
113
|
+
// Get KV metrics (data.kv is KVMetrics[])
|
|
114
|
+
// Operations are counters (use delta), storage is gauge (use raw)
|
|
115
|
+
const kvReads = deltas?.kv?.reads ?? data.kv.reduce((sum, k) => sum + k.reads, 0);
|
|
116
|
+
const kvWrites = deltas?.kv?.writes ?? data.kv.reduce((sum, k) => sum + k.writes, 0);
|
|
117
|
+
const kvDeletes = deltas?.kv?.deletes ?? data.kv.reduce((sum, k) => sum + k.deletes, 0);
|
|
118
|
+
const kvListOps = deltas?.kv?.lists ?? data.kv.reduce((sum, k) => sum + k.lists, 0);
|
|
119
|
+
const kvStorageBytes = data.kv.reduce((sum, k) => sum + (k.storageBytes ?? 0), 0); // Gauge - raw
|
|
120
|
+
|
|
121
|
+
// Get R2 metrics (data.r2 is R2Metrics[])
|
|
122
|
+
// Operations and egress are counters (use delta), storage is gauge (use raw)
|
|
123
|
+
const r2ClassAOps =
|
|
124
|
+
deltas?.r2?.classAOps ?? data.r2.reduce((sum, r) => sum + r.classAOperations, 0);
|
|
125
|
+
const r2ClassBOps =
|
|
126
|
+
deltas?.r2?.classBOps ?? data.r2.reduce((sum, r) => sum + r.classBOperations, 0);
|
|
127
|
+
const r2StorageBytes = data.r2.reduce((sum, r) => sum + r.storageBytes, 0); // Gauge - raw
|
|
128
|
+
const r2EgressBytes =
|
|
129
|
+
deltas?.r2?.egressBytes ?? data.r2.reduce((sum, r) => sum + r.egressBytes, 0);
|
|
130
|
+
|
|
131
|
+
// Get Durable Objects metrics (data.durableObjects is DOMetrics - single object, not array)
|
|
132
|
+
// Requests, gbSeconds, and storage operations are counters (use delta)
|
|
133
|
+
// Storage bytes is a gauge (use raw)
|
|
134
|
+
const doRequests = deltas?.do?.requests ?? data.durableObjects.requests;
|
|
135
|
+
const doGbSeconds = deltas?.do?.gbSeconds ?? data.durableObjects.gbSeconds;
|
|
136
|
+
const doWsConnections = 0; // Not in DOMetrics interface
|
|
137
|
+
const doStorageReads = deltas?.do?.storageReadUnits ?? data.durableObjects.storageReadUnits;
|
|
138
|
+
const doStorageWrites = deltas?.do?.storageWriteUnits ?? data.durableObjects.storageWriteUnits;
|
|
139
|
+
const doStorageDeletes = deltas?.do?.storageDeleteUnits ?? data.durableObjects.storageDeleteUnits;
|
|
140
|
+
const doStorageBytes = data.durableObjects.storageBytes; // Gauge - raw (GB-months billing)
|
|
141
|
+
|
|
142
|
+
// Get Vectorize metrics (data.vectorize is VectorizeInfo[] from REST, aiMetrics from GraphQL)
|
|
143
|
+
// Queries are counters (use delta), stored dimensions is a gauge (use raw)
|
|
144
|
+
const vectorizeQueries = deltas?.vectorize?.queries ?? aiMetrics?.vectorizeQueries ?? 0;
|
|
145
|
+
// IMPORTANT: For billing, Cloudflare charges per stored dimension (vectorCount * dimensions per index)
|
|
146
|
+
// We store total stored dimensions in vectorize_vectors_stored, and set vectorize_dimensions to 1
|
|
147
|
+
// This ensures correct billing calculations: total_stored_dimensions * 1 = total_stored_dimensions
|
|
148
|
+
// vectorizeVectorsStored is a gauge (current storage), not a counter
|
|
149
|
+
const vectorizeVectorsStored = data.vectorize.reduce(
|
|
150
|
+
(sum, v) => sum + v.vectorCount * v.dimensions,
|
|
151
|
+
0
|
|
152
|
+
);
|
|
153
|
+
const vectorizeDimensions = 1; // Multiplier is 1 since vectorizeVectorsStored now holds total stored dimensions
|
|
154
|
+
|
|
155
|
+
// Get AI Gateway metrics (data.aiGateway is AIGatewayMetrics[])
|
|
156
|
+
// All AI Gateway metrics are counters (use delta)
|
|
157
|
+
const aigRequests =
|
|
158
|
+
deltas?.aiGateway?.requests ?? data.aiGateway.reduce((sum, a) => sum + a.totalRequests, 0);
|
|
159
|
+
const aigTokensIn = deltas?.aiGateway?.tokensIn ?? 0; // Tokens in not available separately
|
|
160
|
+
const aigTokensOut =
|
|
161
|
+
deltas?.aiGateway?.tokensOut ?? data.aiGateway.reduce((sum, a) => sum + a.totalTokens, 0);
|
|
162
|
+
const aigCached =
|
|
163
|
+
deltas?.aiGateway?.cached ?? data.aiGateway.reduce((sum, a) => sum + a.cachedRequests, 0);
|
|
164
|
+
|
|
165
|
+
// Get Pages metrics (data.pages is PagesMetrics[] - deployments only, no request/bandwidth data)
|
|
166
|
+
// Deployments are all-time counts (use delta for hourly change)
|
|
167
|
+
const pagesDeployments =
|
|
168
|
+
deltas?.pages?.deployments ?? data.pages.reduce((sum, p) => sum + p.totalBuilds, 0);
|
|
169
|
+
const pagesBandwidth = deltas?.pages?.bandwidthBytes ?? 0;
|
|
170
|
+
|
|
171
|
+
// Queues metrics (from GraphQL queueConsumerMetricsAdaptiveGroups + queueMessageOperationsAdaptiveGroups)
|
|
172
|
+
// Message counts are counters (use delta)
|
|
173
|
+
const queuesProduced = deltas?.queues?.produced ?? queues?.messagesProduced ?? 0;
|
|
174
|
+
const queuesConsumed = deltas?.queues?.consumed ?? queues?.messagesConsumed ?? 0;
|
|
175
|
+
|
|
176
|
+
// Workers AI metrics now come from GraphQL (aiInferenceAdaptive dataset)
|
|
177
|
+
// Requests and neurons are counters (use delta)
|
|
178
|
+
const workersAIRequests = deltas?.workersAI?.requests ?? aiMetrics?.workersAIRequests ?? 0;
|
|
179
|
+
const workersAINeurons = deltas?.workersAI?.neurons ?? aiMetrics?.workersAINeurons ?? 0;
|
|
180
|
+
// Calculate Workers AI cost from neurons if not provided in costs (costs.workersAI is always 0 from calculateMonthlyCosts)
|
|
181
|
+
// Load pricing from KV (with fallback to defaults) - pricing is cached per-request
|
|
182
|
+
const pricing = await loadPricing(env);
|
|
183
|
+
const neuronsPerUsd = pricing.workersAI.neuronsPerThousand / 1000; // $0.011 per 1000 = $0.000011 per neuron
|
|
184
|
+
const workersAICost = costs.workersAI > 0 ? costs.workersAI : workersAINeurons * neuronsPerUsd;
|
|
185
|
+
const persistLog = createLoggerFromEnv(env, 'platform-usage', 'platform:usage:persist');
|
|
186
|
+
persistLog.info(
|
|
187
|
+
`WorkersAI: neurons=${workersAINeurons}, costs.workersAI=${costs.workersAI}, calculatedCost=${workersAICost}`,
|
|
188
|
+
{ tag: 'PERSIST' }
|
|
189
|
+
);
|
|
190
|
+
|
|
191
|
+
// Calculate total cost now that workersAICost is defined
|
|
192
|
+
const totalCost =
|
|
193
|
+
costs.workers +
|
|
194
|
+
costs.d1 +
|
|
195
|
+
costs.kv +
|
|
196
|
+
costs.r2 +
|
|
197
|
+
costs.durableObjects +
|
|
198
|
+
costs.vectorize +
|
|
199
|
+
costs.aiGateway +
|
|
200
|
+
costs.pages +
|
|
201
|
+
costs.queues +
|
|
202
|
+
workersAICost;
|
|
203
|
+
|
|
204
|
+
// Get Workflows metrics (passed in separately as not part of AccountUsage)
|
|
205
|
+
// All workflow metrics are counters (use delta)
|
|
206
|
+
const workflowsExecutions = deltas?.workflows?.executions ?? workflows?.executions ?? 0;
|
|
207
|
+
const workflowsSuccesses = deltas?.workflows?.successes ?? workflows?.successes ?? 0;
|
|
208
|
+
const workflowsFailures = deltas?.workflows?.failures ?? workflows?.failures ?? 0;
|
|
209
|
+
const workflowsWallTimeMs = deltas?.workflows?.wallTimeMs ?? workflows?.wallTimeMs ?? 0;
|
|
210
|
+
const workflowsCpuTimeMs = deltas?.workflows?.cpuTimeMs ?? workflows?.cpuTimeMs ?? 0;
|
|
211
|
+
|
|
212
|
+
const samplingModeStr =
|
|
213
|
+
[
|
|
214
|
+
'FULL',
|
|
215
|
+
'HALF',
|
|
216
|
+
'',
|
|
217
|
+
'',
|
|
218
|
+
'QUARTER',
|
|
219
|
+
'',
|
|
220
|
+
'',
|
|
221
|
+
'',
|
|
222
|
+
'',
|
|
223
|
+
'',
|
|
224
|
+
'',
|
|
225
|
+
'',
|
|
226
|
+
'',
|
|
227
|
+
'',
|
|
228
|
+
'',
|
|
229
|
+
'',
|
|
230
|
+
'',
|
|
231
|
+
'',
|
|
232
|
+
'',
|
|
233
|
+
'',
|
|
234
|
+
'',
|
|
235
|
+
'',
|
|
236
|
+
'',
|
|
237
|
+
'',
|
|
238
|
+
'MINIMAL',
|
|
239
|
+
][samplingMode] || 'FULL';
|
|
240
|
+
|
|
241
|
+
await env.PLATFORM_DB.prepare(
|
|
242
|
+
`
|
|
243
|
+
INSERT INTO hourly_usage_snapshots (
|
|
244
|
+
id, snapshot_hour, project,
|
|
245
|
+
workers_requests, workers_errors, workers_cpu_time_ms,
|
|
246
|
+
workers_duration_p50_ms, workers_duration_p99_ms, workers_cost_usd,
|
|
247
|
+
d1_rows_read, d1_rows_written, d1_storage_bytes, d1_cost_usd,
|
|
248
|
+
kv_reads, kv_writes, kv_deletes, kv_list_ops, kv_storage_bytes, kv_cost_usd,
|
|
249
|
+
r2_class_a_ops, r2_class_b_ops, r2_storage_bytes, r2_egress_bytes, r2_cost_usd,
|
|
250
|
+
do_requests, do_gb_seconds, do_websocket_connections, do_storage_reads, do_storage_writes, do_storage_deletes, do_storage_bytes, do_cost_usd,
|
|
251
|
+
vectorize_queries, vectorize_vectors_stored, vectorize_dimensions, vectorize_cost_usd,
|
|
252
|
+
aigateway_requests, aigateway_tokens_in, aigateway_tokens_out, aigateway_cached_requests, aigateway_cost_usd,
|
|
253
|
+
pages_deployments, pages_bandwidth_bytes, pages_cost_usd,
|
|
254
|
+
queues_messages_produced, queues_messages_consumed, queues_cost_usd,
|
|
255
|
+
workersai_requests, workersai_neurons, workersai_cost_usd,
|
|
256
|
+
workflows_executions, workflows_successes, workflows_failures, workflows_wall_time_ms, workflows_cpu_time_ms, workflows_cost_usd,
|
|
257
|
+
total_cost_usd, collection_timestamp, sampling_mode
|
|
258
|
+
) VALUES (
|
|
259
|
+
?, ?, ?,
|
|
260
|
+
?, ?, ?, ?, ?, ?,
|
|
261
|
+
?, ?, ?, ?,
|
|
262
|
+
?, ?, ?, ?, ?, ?,
|
|
263
|
+
?, ?, ?, ?, ?,
|
|
264
|
+
?, ?, ?, ?, ?, ?, ?, ?,
|
|
265
|
+
?, ?, ?, ?,
|
|
266
|
+
?, ?, ?, ?, ?,
|
|
267
|
+
?, ?, ?,
|
|
268
|
+
?, ?, ?,
|
|
269
|
+
?, ?, ?,
|
|
270
|
+
?, ?, ?, ?, ?, ?,
|
|
271
|
+
?, ?, ?
|
|
272
|
+
)
|
|
273
|
+
ON CONFLICT (snapshot_hour, project) DO NOTHING
|
|
274
|
+
`
|
|
275
|
+
)
|
|
276
|
+
.bind(
|
|
277
|
+
id,
|
|
278
|
+
snapshotHour,
|
|
279
|
+
project,
|
|
280
|
+
workersRequests,
|
|
281
|
+
workersErrors,
|
|
282
|
+
workersCpuTime,
|
|
283
|
+
workersDurationP50,
|
|
284
|
+
workersDurationP99,
|
|
285
|
+
costs.workers,
|
|
286
|
+
d1RowsRead,
|
|
287
|
+
d1RowsWritten,
|
|
288
|
+
d1StorageBytes,
|
|
289
|
+
costs.d1,
|
|
290
|
+
kvReads,
|
|
291
|
+
kvWrites,
|
|
292
|
+
kvDeletes,
|
|
293
|
+
kvListOps,
|
|
294
|
+
kvStorageBytes,
|
|
295
|
+
costs.kv,
|
|
296
|
+
r2ClassAOps,
|
|
297
|
+
r2ClassBOps,
|
|
298
|
+
r2StorageBytes,
|
|
299
|
+
r2EgressBytes,
|
|
300
|
+
costs.r2,
|
|
301
|
+
doRequests,
|
|
302
|
+
doGbSeconds,
|
|
303
|
+
doWsConnections,
|
|
304
|
+
doStorageReads,
|
|
305
|
+
doStorageWrites,
|
|
306
|
+
doStorageDeletes,
|
|
307
|
+
doStorageBytes,
|
|
308
|
+
costs.durableObjects,
|
|
309
|
+
vectorizeQueries,
|
|
310
|
+
vectorizeVectorsStored,
|
|
311
|
+
vectorizeDimensions,
|
|
312
|
+
costs.vectorize,
|
|
313
|
+
aigRequests,
|
|
314
|
+
aigTokensIn,
|
|
315
|
+
aigTokensOut,
|
|
316
|
+
aigCached,
|
|
317
|
+
costs.aiGateway,
|
|
318
|
+
pagesDeployments,
|
|
319
|
+
pagesBandwidth,
|
|
320
|
+
costs.pages,
|
|
321
|
+
queuesProduced,
|
|
322
|
+
queuesConsumed,
|
|
323
|
+
costs.queues,
|
|
324
|
+
workersAIRequests,
|
|
325
|
+
workersAINeurons,
|
|
326
|
+
workersAICost,
|
|
327
|
+
workflowsExecutions,
|
|
328
|
+
workflowsSuccesses,
|
|
329
|
+
workflowsFailures,
|
|
330
|
+
workflowsWallTimeMs,
|
|
331
|
+
workflowsCpuTimeMs,
|
|
332
|
+
costs.workflows,
|
|
333
|
+
totalCost,
|
|
334
|
+
timestamp,
|
|
335
|
+
samplingModeStr
|
|
336
|
+
)
|
|
337
|
+
.run();
|
|
338
|
+
|
|
339
|
+
// Return approximate write count (1 for this insert/update)
|
|
340
|
+
return 1;
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
// =============================================================================
|
|
344
|
+
// RESOURCE-LEVEL SNAPSHOT PERSISTENCE
|
|
345
|
+
// =============================================================================
|
|
346
|
+
|
|
347
|
+
/**
|
|
348
|
+
* Resource row for batch inserts
|
|
349
|
+
*/
|
|
350
|
+
interface ResourceRow {
|
|
351
|
+
id: string;
|
|
352
|
+
snapshot_hour: string;
|
|
353
|
+
resource_type: string;
|
|
354
|
+
resource_id: string;
|
|
355
|
+
resource_name: string | null;
|
|
356
|
+
project: string;
|
|
357
|
+
requests: number | null;
|
|
358
|
+
cpu_time_ms: number | null;
|
|
359
|
+
wall_time_ms: number | null;
|
|
360
|
+
duration_ms: number | null;
|
|
361
|
+
gb_seconds: number | null;
|
|
362
|
+
storage_bytes: number | null;
|
|
363
|
+
reads: number | null;
|
|
364
|
+
writes: number | null;
|
|
365
|
+
deletes: number | null;
|
|
366
|
+
rows_read: number | null;
|
|
367
|
+
rows_written: number | null;
|
|
368
|
+
class_a_ops: number | null;
|
|
369
|
+
class_b_ops: number | null;
|
|
370
|
+
egress_bytes: number | null;
|
|
371
|
+
neurons: number | null;
|
|
372
|
+
cost_usd: number | null;
|
|
373
|
+
source: string;
|
|
374
|
+
confidence: number;
|
|
375
|
+
allocation_basis: string | null;
|
|
376
|
+
ingested_at: string;
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
/**
|
|
380
|
+
* Persist resource-level usage snapshots for multi-level aggregation.
|
|
381
|
+
*
|
|
382
|
+
* Stores per-resource metrics in resource_usage_snapshots table, enabling:
|
|
383
|
+
* - Account-level aggregation (SUM all resources)
|
|
384
|
+
* - Per-CF-tool aggregation (GROUP BY resource_type)
|
|
385
|
+
* - Per-project aggregation (GROUP BY project)
|
|
386
|
+
* - Per-project-per-tool aggregation (GROUP BY project, resource_type)
|
|
387
|
+
*
|
|
388
|
+
* @param env - Worker environment
|
|
389
|
+
* @param snapshotHour - ISO datetime string for the hour
|
|
390
|
+
* @param usage - AccountUsage from GraphQL
|
|
391
|
+
* @param queuesData - Queues metrics
|
|
392
|
+
* @param workflowsData - Workflows metrics with byWorkflow breakdown
|
|
393
|
+
* @returns Number of D1 writes
|
|
394
|
+
*/
|
|
395
|
+
export async function persistResourceUsageSnapshots(
|
|
396
|
+
env: Env,
|
|
397
|
+
snapshotHour: string,
|
|
398
|
+
usage: AccountUsage,
|
|
399
|
+
queuesData: QueuesMetrics[],
|
|
400
|
+
workflowsData: {
|
|
401
|
+
byWorkflow: Array<{
|
|
402
|
+
workflowName: string;
|
|
403
|
+
executions: number;
|
|
404
|
+
successes: number;
|
|
405
|
+
failures: number;
|
|
406
|
+
wallTimeMs: number;
|
|
407
|
+
cpuTimeMs: number;
|
|
408
|
+
}>;
|
|
409
|
+
}
|
|
410
|
+
): Promise<number> {
|
|
411
|
+
let writeCount = 0;
|
|
412
|
+
const ingestedAt = new Date().toISOString();
|
|
413
|
+
|
|
414
|
+
// Helper to generate unique ID
|
|
415
|
+
const genId = (type: string, resourceId: string) =>
|
|
416
|
+
`${snapshotHour.replace(/[:-]/g, '').slice(0, 13)}-${type}-${resourceId.slice(0, 50)}`;
|
|
417
|
+
|
|
418
|
+
// Helper to identify project for a resource
|
|
419
|
+
const getProject = (_resourceType: string, resourceId: string): string => {
|
|
420
|
+
return identifyProject(resourceId) || 'unknown';
|
|
421
|
+
};
|
|
422
|
+
|
|
423
|
+
const rows: ResourceRow[] = [];
|
|
424
|
+
|
|
425
|
+
// 1. Workers - per scriptName
|
|
426
|
+
for (const w of usage.workers) {
|
|
427
|
+
const project = getProject('worker', w.scriptName);
|
|
428
|
+
rows.push({
|
|
429
|
+
id: genId('worker', w.scriptName),
|
|
430
|
+
snapshot_hour: snapshotHour,
|
|
431
|
+
resource_type: 'worker',
|
|
432
|
+
resource_id: w.scriptName,
|
|
433
|
+
resource_name: w.scriptName,
|
|
434
|
+
project,
|
|
435
|
+
requests: w.requests,
|
|
436
|
+
cpu_time_ms: w.cpuTimeMs,
|
|
437
|
+
wall_time_ms: null,
|
|
438
|
+
duration_ms: null,
|
|
439
|
+
gb_seconds: null,
|
|
440
|
+
storage_bytes: null,
|
|
441
|
+
reads: null,
|
|
442
|
+
writes: null,
|
|
443
|
+
deletes: null,
|
|
444
|
+
rows_read: null,
|
|
445
|
+
rows_written: null,
|
|
446
|
+
class_a_ops: null,
|
|
447
|
+
class_b_ops: null,
|
|
448
|
+
egress_bytes: null,
|
|
449
|
+
neurons: null,
|
|
450
|
+
cost_usd: null, // Cost calculated at project level
|
|
451
|
+
source: 'live',
|
|
452
|
+
confidence: 100,
|
|
453
|
+
allocation_basis: null,
|
|
454
|
+
ingested_at: ingestedAt,
|
|
455
|
+
});
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
// 2. D1 - per databaseId
|
|
459
|
+
for (const db of usage.d1) {
|
|
460
|
+
const project = getProject('d1', db.databaseName);
|
|
461
|
+
rows.push({
|
|
462
|
+
id: genId('d1', db.databaseId),
|
|
463
|
+
snapshot_hour: snapshotHour,
|
|
464
|
+
resource_type: 'd1',
|
|
465
|
+
resource_id: db.databaseId,
|
|
466
|
+
resource_name: db.databaseName,
|
|
467
|
+
project,
|
|
468
|
+
requests: null,
|
|
469
|
+
cpu_time_ms: null,
|
|
470
|
+
wall_time_ms: null,
|
|
471
|
+
duration_ms: null,
|
|
472
|
+
gb_seconds: null,
|
|
473
|
+
storage_bytes: db.storageBytes,
|
|
474
|
+
reads: null,
|
|
475
|
+
writes: null,
|
|
476
|
+
deletes: null,
|
|
477
|
+
rows_read: db.rowsRead,
|
|
478
|
+
rows_written: db.rowsWritten,
|
|
479
|
+
class_a_ops: null,
|
|
480
|
+
class_b_ops: null,
|
|
481
|
+
egress_bytes: null,
|
|
482
|
+
neurons: null,
|
|
483
|
+
cost_usd: null,
|
|
484
|
+
source: 'live',
|
|
485
|
+
confidence: 100,
|
|
486
|
+
allocation_basis: null,
|
|
487
|
+
ingested_at: ingestedAt,
|
|
488
|
+
});
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
// 3. KV - per namespaceId
|
|
492
|
+
for (const kv of usage.kv) {
|
|
493
|
+
const project = getProject('kv', kv.namespaceName);
|
|
494
|
+
rows.push({
|
|
495
|
+
id: genId('kv', kv.namespaceId),
|
|
496
|
+
snapshot_hour: snapshotHour,
|
|
497
|
+
resource_type: 'kv',
|
|
498
|
+
resource_id: kv.namespaceId,
|
|
499
|
+
resource_name: kv.namespaceName,
|
|
500
|
+
project,
|
|
501
|
+
requests: null,
|
|
502
|
+
cpu_time_ms: null,
|
|
503
|
+
wall_time_ms: null,
|
|
504
|
+
duration_ms: null,
|
|
505
|
+
gb_seconds: null,
|
|
506
|
+
storage_bytes: kv.storageBytes,
|
|
507
|
+
reads: kv.reads,
|
|
508
|
+
writes: kv.writes,
|
|
509
|
+
deletes: kv.deletes,
|
|
510
|
+
rows_read: null,
|
|
511
|
+
rows_written: null,
|
|
512
|
+
class_a_ops: null,
|
|
513
|
+
class_b_ops: null,
|
|
514
|
+
egress_bytes: null,
|
|
515
|
+
neurons: null,
|
|
516
|
+
cost_usd: null,
|
|
517
|
+
source: 'live',
|
|
518
|
+
confidence: 100,
|
|
519
|
+
allocation_basis: null,
|
|
520
|
+
ingested_at: ingestedAt,
|
|
521
|
+
});
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
// 4. R2 - per bucketName
|
|
525
|
+
for (const r2 of usage.r2) {
|
|
526
|
+
const project = getProject('r2', r2.bucketName);
|
|
527
|
+
rows.push({
|
|
528
|
+
id: genId('r2', r2.bucketName),
|
|
529
|
+
snapshot_hour: snapshotHour,
|
|
530
|
+
resource_type: 'r2',
|
|
531
|
+
resource_id: r2.bucketName,
|
|
532
|
+
resource_name: r2.bucketName,
|
|
533
|
+
project,
|
|
534
|
+
requests: null,
|
|
535
|
+
cpu_time_ms: null,
|
|
536
|
+
wall_time_ms: null,
|
|
537
|
+
duration_ms: null,
|
|
538
|
+
gb_seconds: null,
|
|
539
|
+
storage_bytes: r2.storageBytes,
|
|
540
|
+
reads: null,
|
|
541
|
+
writes: null,
|
|
542
|
+
deletes: null,
|
|
543
|
+
rows_read: null,
|
|
544
|
+
rows_written: null,
|
|
545
|
+
class_a_ops: r2.classAOperations,
|
|
546
|
+
class_b_ops: r2.classBOperations,
|
|
547
|
+
egress_bytes: r2.egressBytes,
|
|
548
|
+
neurons: null,
|
|
549
|
+
cost_usd: null,
|
|
550
|
+
source: 'live',
|
|
551
|
+
confidence: 100,
|
|
552
|
+
allocation_basis: null,
|
|
553
|
+
ingested_at: ingestedAt,
|
|
554
|
+
});
|
|
555
|
+
}
|
|
556
|
+
|
|
557
|
+
// 5. Durable Objects - per scriptName (with proportional allocation for gbSeconds/storageBytes)
|
|
558
|
+
// DO has byScript for requests, but gbSeconds/storageBytes are account-level
|
|
559
|
+
const doByScript = usage.durableObjects.byScript || [];
|
|
560
|
+
const totalDORequests = doByScript.reduce((sum, d) => sum + d.requests, 0);
|
|
561
|
+
const accountGbSeconds = usage.durableObjects.gbSeconds;
|
|
562
|
+
const accountStorageBytes = usage.durableObjects.storageBytes;
|
|
563
|
+
|
|
564
|
+
for (const doScript of doByScript) {
|
|
565
|
+
const project = getProject('do', doScript.scriptName);
|
|
566
|
+
const proportion = totalDORequests > 0 ? doScript.requests / totalDORequests : 0;
|
|
567
|
+
const allocatedGbSeconds = accountGbSeconds * proportion;
|
|
568
|
+
const allocatedStorageBytes = accountStorageBytes * proportion;
|
|
569
|
+
const isEstimated = totalDORequests > 0 && (accountGbSeconds > 0 || accountStorageBytes > 0);
|
|
570
|
+
|
|
571
|
+
rows.push({
|
|
572
|
+
id: genId('do', doScript.scriptName),
|
|
573
|
+
snapshot_hour: snapshotHour,
|
|
574
|
+
resource_type: 'do',
|
|
575
|
+
resource_id: doScript.scriptName,
|
|
576
|
+
resource_name: doScript.scriptName,
|
|
577
|
+
project,
|
|
578
|
+
requests: doScript.requests,
|
|
579
|
+
cpu_time_ms: null,
|
|
580
|
+
wall_time_ms: null,
|
|
581
|
+
duration_ms: null,
|
|
582
|
+
gb_seconds: allocatedGbSeconds,
|
|
583
|
+
storage_bytes: Math.round(allocatedStorageBytes),
|
|
584
|
+
reads: null,
|
|
585
|
+
writes: null,
|
|
586
|
+
deletes: null,
|
|
587
|
+
rows_read: null,
|
|
588
|
+
rows_written: null,
|
|
589
|
+
class_a_ops: null,
|
|
590
|
+
class_b_ops: null,
|
|
591
|
+
egress_bytes: null,
|
|
592
|
+
neurons: null,
|
|
593
|
+
cost_usd: null,
|
|
594
|
+
source: isEstimated ? 'estimated' : 'live',
|
|
595
|
+
confidence: isEstimated ? 80 : 100,
|
|
596
|
+
allocation_basis: isEstimated
|
|
597
|
+
? `proportional_by_requests (${(proportion * 100).toFixed(1)}%)`
|
|
598
|
+
: null,
|
|
599
|
+
ingested_at: ingestedAt,
|
|
600
|
+
});
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
// 6. Vectorize - per index name
|
|
604
|
+
for (const v of usage.vectorize) {
|
|
605
|
+
const project = getProject('vectorize', v.name);
|
|
606
|
+
rows.push({
|
|
607
|
+
id: genId('vectorize', v.name),
|
|
608
|
+
snapshot_hour: snapshotHour,
|
|
609
|
+
resource_type: 'vectorize',
|
|
610
|
+
resource_id: v.name,
|
|
611
|
+
resource_name: v.name,
|
|
612
|
+
project,
|
|
613
|
+
requests: null, // VectorizeInfo doesn't have query counts, just storage info
|
|
614
|
+
cpu_time_ms: null,
|
|
615
|
+
wall_time_ms: null,
|
|
616
|
+
duration_ms: null,
|
|
617
|
+
gb_seconds: null,
|
|
618
|
+
storage_bytes: v.vectorCount * v.dimensions * 4, // Approximate: float32 vectors
|
|
619
|
+
reads: null,
|
|
620
|
+
writes: null,
|
|
621
|
+
deletes: null,
|
|
622
|
+
rows_read: null,
|
|
623
|
+
rows_written: null,
|
|
624
|
+
class_a_ops: null,
|
|
625
|
+
class_b_ops: null,
|
|
626
|
+
egress_bytes: null,
|
|
627
|
+
neurons: null,
|
|
628
|
+
cost_usd: null,
|
|
629
|
+
source: 'live',
|
|
630
|
+
confidence: 100,
|
|
631
|
+
allocation_basis: null,
|
|
632
|
+
ingested_at: ingestedAt,
|
|
633
|
+
});
|
|
634
|
+
}
|
|
635
|
+
|
|
636
|
+
// 7. Queues - per queueId
|
|
637
|
+
for (const q of queuesData) {
|
|
638
|
+
const project = getProject('queues', q.queueName);
|
|
639
|
+
rows.push({
|
|
640
|
+
id: genId('queues', q.queueId),
|
|
641
|
+
snapshot_hour: snapshotHour,
|
|
642
|
+
resource_type: 'queues',
|
|
643
|
+
resource_id: q.queueId,
|
|
644
|
+
resource_name: q.queueName,
|
|
645
|
+
project,
|
|
646
|
+
requests: q.messagesProduced + q.messagesConsumed,
|
|
647
|
+
cpu_time_ms: null,
|
|
648
|
+
wall_time_ms: null,
|
|
649
|
+
duration_ms: null,
|
|
650
|
+
gb_seconds: null,
|
|
651
|
+
storage_bytes: null,
|
|
652
|
+
reads: q.messagesConsumed,
|
|
653
|
+
writes: q.messagesProduced,
|
|
654
|
+
deletes: null,
|
|
655
|
+
rows_read: null,
|
|
656
|
+
rows_written: null,
|
|
657
|
+
class_a_ops: null,
|
|
658
|
+
class_b_ops: null,
|
|
659
|
+
egress_bytes: null,
|
|
660
|
+
neurons: null,
|
|
661
|
+
cost_usd: null,
|
|
662
|
+
source: 'live',
|
|
663
|
+
confidence: 100,
|
|
664
|
+
allocation_basis: null,
|
|
665
|
+
ingested_at: ingestedAt,
|
|
666
|
+
});
|
|
667
|
+
}
|
|
668
|
+
|
|
669
|
+
// 8. Workflows - per workflowName
|
|
670
|
+
for (const wf of workflowsData.byWorkflow) {
|
|
671
|
+
const project = getProject('workflows', wf.workflowName);
|
|
672
|
+
rows.push({
|
|
673
|
+
id: genId('workflows', wf.workflowName),
|
|
674
|
+
snapshot_hour: snapshotHour,
|
|
675
|
+
resource_type: 'workflows',
|
|
676
|
+
resource_id: wf.workflowName,
|
|
677
|
+
resource_name: wf.workflowName,
|
|
678
|
+
project,
|
|
679
|
+
requests: wf.executions,
|
|
680
|
+
cpu_time_ms: wf.cpuTimeMs,
|
|
681
|
+
wall_time_ms: wf.wallTimeMs,
|
|
682
|
+
duration_ms: null,
|
|
683
|
+
gb_seconds: null,
|
|
684
|
+
storage_bytes: null,
|
|
685
|
+
reads: null,
|
|
686
|
+
writes: null,
|
|
687
|
+
deletes: null,
|
|
688
|
+
rows_read: null,
|
|
689
|
+
rows_written: null,
|
|
690
|
+
class_a_ops: null,
|
|
691
|
+
class_b_ops: null,
|
|
692
|
+
egress_bytes: null,
|
|
693
|
+
neurons: null,
|
|
694
|
+
cost_usd: null,
|
|
695
|
+
source: 'live',
|
|
696
|
+
confidence: 100,
|
|
697
|
+
allocation_basis: null,
|
|
698
|
+
ingested_at: ingestedAt,
|
|
699
|
+
});
|
|
700
|
+
}
|
|
701
|
+
|
|
702
|
+
// 9. AI Gateway - per gatewayId
|
|
703
|
+
for (const gw of usage.aiGateway) {
|
|
704
|
+
const project = getProject('aigateway', gw.gatewayId);
|
|
705
|
+
rows.push({
|
|
706
|
+
id: genId('aigateway', gw.gatewayId),
|
|
707
|
+
snapshot_hour: snapshotHour,
|
|
708
|
+
resource_type: 'aigateway',
|
|
709
|
+
resource_id: gw.gatewayId,
|
|
710
|
+
resource_name: gw.gatewayId,
|
|
711
|
+
project,
|
|
712
|
+
requests: gw.totalRequests,
|
|
713
|
+
cpu_time_ms: null,
|
|
714
|
+
wall_time_ms: null,
|
|
715
|
+
duration_ms: null,
|
|
716
|
+
gb_seconds: null,
|
|
717
|
+
storage_bytes: null,
|
|
718
|
+
reads: null,
|
|
719
|
+
writes: null,
|
|
720
|
+
deletes: null,
|
|
721
|
+
rows_read: null,
|
|
722
|
+
rows_written: null,
|
|
723
|
+
class_a_ops: null,
|
|
724
|
+
class_b_ops: null,
|
|
725
|
+
egress_bytes: null,
|
|
726
|
+
neurons: null,
|
|
727
|
+
cost_usd: null,
|
|
728
|
+
source: 'live',
|
|
729
|
+
confidence: 100,
|
|
730
|
+
allocation_basis: null,
|
|
731
|
+
ingested_at: ingestedAt,
|
|
732
|
+
});
|
|
733
|
+
}
|
|
734
|
+
|
|
735
|
+
// 10. Pages - per project (from pages array)
|
|
736
|
+
// PagesMetrics tracks deployments/builds, not requests/bandwidth
|
|
737
|
+
for (const pg of usage.pages) {
|
|
738
|
+
const project = getProject('pages', pg.projectName);
|
|
739
|
+
rows.push({
|
|
740
|
+
id: genId('pages', pg.projectName),
|
|
741
|
+
snapshot_hour: snapshotHour,
|
|
742
|
+
resource_type: 'pages',
|
|
743
|
+
resource_id: pg.projectName,
|
|
744
|
+
resource_name: pg.projectName,
|
|
745
|
+
project,
|
|
746
|
+
requests: pg.totalBuilds, // Use totalBuilds as a proxy for activity
|
|
747
|
+
cpu_time_ms: null,
|
|
748
|
+
wall_time_ms: null,
|
|
749
|
+
duration_ms: null,
|
|
750
|
+
gb_seconds: null,
|
|
751
|
+
storage_bytes: null,
|
|
752
|
+
reads: null,
|
|
753
|
+
writes: pg.productionDeployments + pg.previewDeployments, // Total deployments
|
|
754
|
+
deletes: null,
|
|
755
|
+
rows_read: null,
|
|
756
|
+
rows_written: null,
|
|
757
|
+
class_a_ops: null,
|
|
758
|
+
class_b_ops: null,
|
|
759
|
+
egress_bytes: null, // Not available in PagesMetrics
|
|
760
|
+
neurons: null,
|
|
761
|
+
cost_usd: null,
|
|
762
|
+
source: 'live',
|
|
763
|
+
confidence: 100,
|
|
764
|
+
allocation_basis: null,
|
|
765
|
+
ingested_at: ingestedAt,
|
|
766
|
+
});
|
|
767
|
+
}
|
|
768
|
+
|
|
769
|
+
// Batch insert using D1 batch API — reduces ~200 write transactions/hr to ~8
|
|
770
|
+
const RESOURCE_UPSERT_SQL = `INSERT INTO resource_usage_snapshots (
|
|
771
|
+
id, snapshot_hour, resource_type, resource_id, resource_name, project,
|
|
772
|
+
requests, cpu_time_ms, wall_time_ms, duration_ms, gb_seconds, storage_bytes,
|
|
773
|
+
reads, writes, deletes, rows_read, rows_written, class_a_ops, class_b_ops,
|
|
774
|
+
egress_bytes, neurons, cost_usd, source, confidence, allocation_basis, ingested_at
|
|
775
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
776
|
+
ON CONFLICT (snapshot_hour, resource_type, resource_id) DO UPDATE SET
|
|
777
|
+
resource_name = excluded.resource_name,
|
|
778
|
+
project = excluded.project,
|
|
779
|
+
requests = COALESCE(excluded.requests, resource_usage_snapshots.requests),
|
|
780
|
+
cpu_time_ms = COALESCE(excluded.cpu_time_ms, resource_usage_snapshots.cpu_time_ms),
|
|
781
|
+
wall_time_ms = COALESCE(excluded.wall_time_ms, resource_usage_snapshots.wall_time_ms),
|
|
782
|
+
duration_ms = COALESCE(excluded.duration_ms, resource_usage_snapshots.duration_ms),
|
|
783
|
+
gb_seconds = COALESCE(excluded.gb_seconds, resource_usage_snapshots.gb_seconds),
|
|
784
|
+
storage_bytes = COALESCE(excluded.storage_bytes, resource_usage_snapshots.storage_bytes),
|
|
785
|
+
reads = COALESCE(excluded.reads, resource_usage_snapshots.reads),
|
|
786
|
+
writes = COALESCE(excluded.writes, resource_usage_snapshots.writes),
|
|
787
|
+
deletes = COALESCE(excluded.deletes, resource_usage_snapshots.deletes),
|
|
788
|
+
rows_read = COALESCE(excluded.rows_read, resource_usage_snapshots.rows_read),
|
|
789
|
+
rows_written = COALESCE(excluded.rows_written, resource_usage_snapshots.rows_written),
|
|
790
|
+
class_a_ops = COALESCE(excluded.class_a_ops, resource_usage_snapshots.class_a_ops),
|
|
791
|
+
class_b_ops = COALESCE(excluded.class_b_ops, resource_usage_snapshots.class_b_ops),
|
|
792
|
+
egress_bytes = COALESCE(excluded.egress_bytes, resource_usage_snapshots.egress_bytes),
|
|
793
|
+
neurons = COALESCE(excluded.neurons, resource_usage_snapshots.neurons),
|
|
794
|
+
cost_usd = excluded.cost_usd,
|
|
795
|
+
source = CASE
|
|
796
|
+
WHEN excluded.confidence > resource_usage_snapshots.confidence THEN excluded.source
|
|
797
|
+
ELSE resource_usage_snapshots.source
|
|
798
|
+
END,
|
|
799
|
+
confidence = MAX(resource_usage_snapshots.confidence, excluded.confidence),
|
|
800
|
+
allocation_basis = excluded.allocation_basis,
|
|
801
|
+
ingested_at = excluded.ingested_at`;
|
|
802
|
+
|
|
803
|
+
const bindRow = (row: typeof rows[number]): D1PreparedStatement =>
|
|
804
|
+
env.PLATFORM_DB.prepare(RESOURCE_UPSERT_SQL).bind(
|
|
805
|
+
row.id, row.snapshot_hour, row.resource_type, row.resource_id,
|
|
806
|
+
row.resource_name, row.project, row.requests, row.cpu_time_ms,
|
|
807
|
+
row.wall_time_ms, row.duration_ms, row.gb_seconds, row.storage_bytes,
|
|
808
|
+
row.reads, row.writes, row.deletes, row.rows_read, row.rows_written,
|
|
809
|
+
row.class_a_ops, row.class_b_ops, row.egress_bytes, row.neurons,
|
|
810
|
+
row.cost_usd, row.source, row.confidence, row.allocation_basis, row.ingested_at
|
|
811
|
+
);
|
|
812
|
+
|
|
813
|
+
const BATCH_SIZE = 25;
|
|
814
|
+
for (let i = 0; i < rows.length; i += BATCH_SIZE) {
|
|
815
|
+
const chunk = rows.slice(i, i + BATCH_SIZE);
|
|
816
|
+
try {
|
|
817
|
+
const statements = chunk.map(bindRow);
|
|
818
|
+
await env.PLATFORM_DB.batch(statements);
|
|
819
|
+
writeCount += chunk.length;
|
|
820
|
+
} catch (error) {
|
|
821
|
+
// Fallback: try individual inserts for the failed batch
|
|
822
|
+
const batchLog = createLoggerFromEnv(env, 'platform-usage', 'platform:usage:resource-snapshot');
|
|
823
|
+
batchLog.error(
|
|
824
|
+
`Batch insert failed for ${chunk.length} rows, falling back to individual`,
|
|
825
|
+
error instanceof Error ? error : new Error(String(error))
|
|
826
|
+
);
|
|
827
|
+
for (const row of chunk) {
|
|
828
|
+
try {
|
|
829
|
+
await bindRow(row).run();
|
|
830
|
+
writeCount++;
|
|
831
|
+
} catch (individualError) {
|
|
832
|
+
batchLog.error(
|
|
833
|
+
`Failed to insert ${row.resource_type}/${row.resource_id}`,
|
|
834
|
+
individualError instanceof Error ? individualError : new Error(String(individualError))
|
|
835
|
+
);
|
|
836
|
+
}
|
|
837
|
+
}
|
|
838
|
+
}
|
|
839
|
+
}
|
|
840
|
+
|
|
841
|
+
const resourceLog = createLoggerFromEnv(
|
|
842
|
+
env,
|
|
843
|
+
'platform-usage',
|
|
844
|
+
'platform:usage:resource-snapshot'
|
|
845
|
+
);
|
|
846
|
+
resourceLog.info(`Persisted ${writeCount} resource-level snapshots for ${snapshotHour}`, {
|
|
847
|
+
tag: 'RESOURCE-SNAPSHOT',
|
|
848
|
+
});
|
|
849
|
+
return writeCount;
|
|
850
|
+
}
|
|
851
|
+
|
|
852
|
+
// =============================================================================
|
|
853
|
+
// EXTERNAL BILLING COLLECTORS (Re-exported from collectors module)
|
|
854
|
+
// =============================================================================
|
|
855
|
+
|
|
856
|
+
// Re-export the unified collector framework
|
|
857
|
+
// See workers/lib/usage/collectors/ for the collector interface and example
|
|
858
|
+
export {
|
|
859
|
+
collectExternalMetrics,
|
|
860
|
+
type ExternalMetrics,
|
|
861
|
+
type ExternalCollector,
|
|
862
|
+
} from '../collectors';
|
|
863
|
+
|
|
864
|
+
// TODO: Add re-exports for your custom collectors here.
|
|
865
|
+
// See workers/lib/usage/collectors/example.ts for the collector template.
|
|
866
|
+
|
|
867
|
+
// THIRD-PARTY USAGE PERSISTENCE
|
|
868
|
+
// =============================================================================
|
|
869
|
+
|
|
870
|
+
/**
|
|
871
|
+
* Persist third-party usage data (GitHub billing, etc.).
|
|
872
|
+
*/
|
|
873
|
+
export async function persistThirdPartyUsage(
|
|
874
|
+
env: Env,
|
|
875
|
+
date: string,
|
|
876
|
+
provider: string,
|
|
877
|
+
resourceType: string,
|
|
878
|
+
usageValue: number,
|
|
879
|
+
usageUnit: string,
|
|
880
|
+
costUsd: number = 0,
|
|
881
|
+
resourceName?: string
|
|
882
|
+
): Promise<void> {
|
|
883
|
+
await env.PLATFORM_DB.prepare(
|
|
884
|
+
`
|
|
885
|
+
INSERT INTO third_party_usage (
|
|
886
|
+
id, snapshot_date, provider, resource_type, resource_name,
|
|
887
|
+
usage_value, usage_unit, cost_usd, collection_timestamp
|
|
888
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
889
|
+
ON CONFLICT (snapshot_date, provider, resource_type, COALESCE(resource_name, ''))
|
|
890
|
+
DO UPDATE SET
|
|
891
|
+
usage_value = excluded.usage_value,
|
|
892
|
+
cost_usd = excluded.cost_usd,
|
|
893
|
+
collection_timestamp = excluded.collection_timestamp
|
|
894
|
+
`
|
|
895
|
+
)
|
|
896
|
+
.bind(
|
|
897
|
+
generateId(),
|
|
898
|
+
date,
|
|
899
|
+
provider,
|
|
900
|
+
resourceType,
|
|
901
|
+
resourceName || '',
|
|
902
|
+
usageValue,
|
|
903
|
+
usageUnit,
|
|
904
|
+
costUsd,
|
|
905
|
+
Math.floor(Date.now() / 1000)
|
|
906
|
+
)
|
|
907
|
+
.run();
|
|
908
|
+
}
|
|
909
|
+
|
|
910
|
+
// =============================================================================
|
|
911
|
+
// API TOKEN VALIDATION
|
|
912
|
+
// =============================================================================
|
|
913
|
+
|
|
914
|
+
/**
|
|
915
|
+
* Validate Cloudflare API token by making a simple account API call.
|
|
916
|
+
* Returns account name if valid, null if invalid.
|
|
917
|
+
*/
|
|
918
|
+
export async function validateCloudflareToken(env: Env): Promise<string | null> {
|
|
919
|
+
const log = createLoggerFromEnv(env, 'platform-usage', 'platform:usage:token');
|
|
920
|
+
try {
|
|
921
|
+
const response = await fetchWithRetry(
|
|
922
|
+
`https://api.cloudflare.com/client/v4/accounts/${env.CLOUDFLARE_ACCOUNT_ID}`,
|
|
923
|
+
{
|
|
924
|
+
headers: {
|
|
925
|
+
Authorization: `Bearer ${env.CLOUDFLARE_API_TOKEN}`,
|
|
926
|
+
'Content-Type': 'application/json',
|
|
927
|
+
},
|
|
928
|
+
}
|
|
929
|
+
);
|
|
930
|
+
if (!response.ok) {
|
|
931
|
+
const text = await response.text();
|
|
932
|
+
log.error(`CF API token validation failed: ${response.status} - ${text}`);
|
|
933
|
+
return null;
|
|
934
|
+
}
|
|
935
|
+
const data = (await response.json()) as { success: boolean; result?: { name?: string } };
|
|
936
|
+
if (!data.success) {
|
|
937
|
+
log.error('CF API returned success=false');
|
|
938
|
+
return null;
|
|
939
|
+
}
|
|
940
|
+
const accountName = data.result?.name || 'Unknown';
|
|
941
|
+
log.info('CF API token valid', { accountName });
|
|
942
|
+
return accountName;
|
|
943
|
+
} catch (error) {
|
|
944
|
+
log.error('CF API token validation error', error);
|
|
945
|
+
return null;
|
|
946
|
+
}
|
|
947
|
+
}
|
|
948
|
+
|
|
949
|
+
// =============================================================================
|
|
950
|
+
// EXPORTS
|
|
951
|
+
// =============================================================================
|
|
952
|
+
|
|
953
|
+
export {
|
|
954
|
+
// Re-export types for consumers
|
|
955
|
+
type GitHubPlanInclusions,
|
|
956
|
+
};
|