@mcptoolshop/promo-kit 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +17 -0
- package/LICENSE +21 -0
- package/README.md +132 -0
- package/bin/promo-kit.mjs +150 -0
- package/index.mjs +9 -0
- package/kit.config.example.json +19 -0
- package/package.json +45 -0
- package/scripts/apply-control-patch.mjs +205 -0
- package/scripts/apply-submission-status.mjs +225 -0
- package/scripts/gen-baseline.mjs +402 -0
- package/scripts/gen-decision-drift.mjs +253 -0
- package/scripts/gen-experiment-decisions.mjs +282 -0
- package/scripts/gen-feedback-summary.mjs +278 -0
- package/scripts/gen-promo-decisions.mjs +507 -0
- package/scripts/gen-queue-health.mjs +223 -0
- package/scripts/gen-recommendation-patch.mjs +352 -0
- package/scripts/gen-recommendations.mjs +409 -0
- package/scripts/gen-telemetry-aggregate.mjs +266 -0
- package/scripts/gen-trust-receipt.mjs +184 -0
- package/scripts/kit-bootstrap.mjs +246 -0
- package/scripts/kit-migrate.mjs +111 -0
- package/scripts/kit-selftest.mjs +207 -0
- package/scripts/lib/config.mjs +124 -0
|
@@ -0,0 +1,409 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* gen-recommendations.mjs
|
|
4
|
+
*
|
|
5
|
+
* Advisory recommendation engine. Analyzes telemetry rollups, queue health,
|
|
6
|
+
* governance data, and lint reports to surface actionable insights.
|
|
7
|
+
*
|
|
8
|
+
* All recommendations are advisory — humans decide, the system proposes.
|
|
9
|
+
*
|
|
10
|
+
* Usage:
|
|
11
|
+
* node scripts/gen-recommendations.mjs [--dry-run]
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import fs from "node:fs";
|
|
15
|
+
import path from "node:path";
|
|
16
|
+
import { fileURLToPath } from "node:url";
|
|
17
|
+
import { getConfig, getRoot } from "./lib/config.mjs";
|
|
18
|
+
|
|
19
|
+
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
|
20
|
+
const ROOT = getRoot();
|
|
21
|
+
const config = getConfig();
|
|
22
|
+
const isMain = process.argv[1] && path.resolve(process.argv[1]) === path.resolve(fileURLToPath(import.meta.url));
|
|
23
|
+
|
|
24
|
+
// ── Valid category enum ─────────────────────────────────────
|
|
25
|
+
|
|
26
|
+
export const VALID_CATEGORIES = [
|
|
27
|
+
"re-feature",
|
|
28
|
+
"improve-proof",
|
|
29
|
+
"stuck-submission",
|
|
30
|
+
"experiment-graduation",
|
|
31
|
+
"lint-promotion",
|
|
32
|
+
];
|
|
33
|
+
|
|
34
|
+
export const VALID_PRIORITIES = ["high", "medium", "low"];
|
|
35
|
+
|
|
36
|
+
// ── Signal Computation (pure exports) ───────────────────────
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Compute Proof Engagement Score per tool from telemetry rollup.
|
|
40
|
+
* ProofEngagement = click_evidence_link + copy_proof_bullets
|
|
41
|
+
* @param {Record<string, Record<string, number>>} bySlug - rollup.bySlug
|
|
42
|
+
* @returns {Record<string, number>} slug → score
|
|
43
|
+
*/
|
|
44
|
+
export function computeProofEngagementBySlug(bySlug) {
|
|
45
|
+
if (!bySlug || typeof bySlug !== "object") return {};
|
|
46
|
+
const scores = {};
|
|
47
|
+
for (const [slug, counts] of Object.entries(bySlug)) {
|
|
48
|
+
const clicks = counts.click_evidence_link || 0;
|
|
49
|
+
const bullets = counts.copy_proof_bullets || 0;
|
|
50
|
+
const score = clicks + bullets;
|
|
51
|
+
if (score > 0) scores[slug] = score;
|
|
52
|
+
}
|
|
53
|
+
return scores;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Compute Submission Friction Score per submission.
|
|
58
|
+
* Friction = lint warning count + needs-info penalty (2) + stuck penalty (1 if >7d)
|
|
59
|
+
* @param {object[]} submissions - submissions.json .submissions array
|
|
60
|
+
* @param {Record<string, object>} lintReports - slug → { errors, warnings }
|
|
61
|
+
* @param {{ now?: Date }} opts
|
|
62
|
+
* @returns {Record<string, number>} slug → friction score
|
|
63
|
+
*/
|
|
64
|
+
export function computeSubmissionFrictionBySlug(submissions, lintReports = {}, opts = {}) {
|
|
65
|
+
if (!Array.isArray(submissions)) return {};
|
|
66
|
+
const now = opts.now || new Date();
|
|
67
|
+
const scores = {};
|
|
68
|
+
|
|
69
|
+
for (const sub of submissions) {
|
|
70
|
+
if (!sub.slug) continue;
|
|
71
|
+
const lint = lintReports[sub.slug] || {};
|
|
72
|
+
const warningCount = (lint.warnings || []).length;
|
|
73
|
+
const needsInfoPenalty = sub.status === "needs-info" ? 2 : 0;
|
|
74
|
+
|
|
75
|
+
let stuckPenalty = 0;
|
|
76
|
+
if ((sub.status === "pending" || sub.status === "needs-info") && sub.submittedAt) {
|
|
77
|
+
const days = Math.floor((now.getTime() - new Date(sub.submittedAt).getTime()) / (1000 * 60 * 60 * 24));
|
|
78
|
+
if (days > 7) stuckPenalty = 1;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
const score = warningCount + needsInfoPenalty + stuckPenalty;
|
|
82
|
+
if (score > 0) scores[sub.slug] = score;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
return scores;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// ── Category Finders ────────────────────────────────────────
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* Find tools with high proof engagement that aren't currently featured.
|
|
92
|
+
* @param {object} signals
|
|
93
|
+
* @param {object} overrides
|
|
94
|
+
* @returns {object[]}
|
|
95
|
+
*/
|
|
96
|
+
function findHighTrustTools(signals, overrides = {}) {
|
|
97
|
+
const results = [];
|
|
98
|
+
const engagement = signals.proofEngagementBySlug || {};
|
|
99
|
+
|
|
100
|
+
const sorted = Object.entries(engagement)
|
|
101
|
+
.sort((a, b) => b[1] - a[1])
|
|
102
|
+
.filter(([slug]) => !overrides[slug]?.featured);
|
|
103
|
+
|
|
104
|
+
for (const [slug, score] of sorted.slice(0, 5)) {
|
|
105
|
+
if (score > 5) {
|
|
106
|
+
results.push({
|
|
107
|
+
priority: "high",
|
|
108
|
+
category: "re-feature",
|
|
109
|
+
slug,
|
|
110
|
+
title: `Re-feature ${slug}`,
|
|
111
|
+
insight: `High proof engagement score (${score}) — users actively checking evidence`,
|
|
112
|
+
action: `Consider adding "${slug}" to featured collection or promo queue`,
|
|
113
|
+
evidence: { proofEngagementScore: score, currentlyFeatured: false },
|
|
114
|
+
});
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
return results.slice(0, 3);
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* Find tools with high install copies but low proof engagement.
|
|
123
|
+
* @param {object} signals
|
|
124
|
+
* @param {object} rollup
|
|
125
|
+
* @returns {object[]}
|
|
126
|
+
*/
|
|
127
|
+
function findLowProofEngagementTools(signals, rollup = {}) {
|
|
128
|
+
const results = [];
|
|
129
|
+
const bySlug = rollup.bySlug || {};
|
|
130
|
+
const engagement = signals.proofEngagementBySlug || {};
|
|
131
|
+
|
|
132
|
+
for (const [slug, counts] of Object.entries(bySlug)) {
|
|
133
|
+
const installs = counts.copy_install || 0;
|
|
134
|
+
const proofScore = engagement[slug] || 0;
|
|
135
|
+
|
|
136
|
+
if (installs > 5 && proofScore < 2) {
|
|
137
|
+
results.push({
|
|
138
|
+
priority: "medium",
|
|
139
|
+
category: "improve-proof",
|
|
140
|
+
slug,
|
|
141
|
+
title: `Improve proof for ${slug}`,
|
|
142
|
+
insight: `${installs} install copies but only ${proofScore} proof interactions`,
|
|
143
|
+
action: `Review publicProof quality — add demo, benchmark, or better evidence links`,
|
|
144
|
+
evidence: { installCopies: installs, proofEngagementScore: proofScore },
|
|
145
|
+
});
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
return results
|
|
150
|
+
.sort((a, b) => b.evidence.installCopies - a.evidence.installCopies)
|
|
151
|
+
.slice(0, 3);
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
/**
|
|
155
|
+
* Find submissions with high friction scores.
|
|
156
|
+
* @param {object} signals
|
|
157
|
+
* @param {object} submissions
|
|
158
|
+
* @returns {object[]}
|
|
159
|
+
*/
|
|
160
|
+
function findHighFrictionSubmissions(signals, submissions = {}) {
|
|
161
|
+
const results = [];
|
|
162
|
+
const subs = submissions.submissions || [];
|
|
163
|
+
const friction = signals.submissionFrictionBySlug || {};
|
|
164
|
+
|
|
165
|
+
for (const sub of subs) {
|
|
166
|
+
const score = friction[sub.slug] || 0;
|
|
167
|
+
if (score >= 3 && (sub.status === "pending" || sub.status === "needs-info")) {
|
|
168
|
+
results.push({
|
|
169
|
+
priority: "high",
|
|
170
|
+
category: "stuck-submission",
|
|
171
|
+
slug: sub.slug,
|
|
172
|
+
title: `Review stuck submission: ${sub.slug}`,
|
|
173
|
+
insight: `Friction score ${score} — status: ${sub.status}`,
|
|
174
|
+
action: `Provide guidance to submitter or escalate for manual review`,
|
|
175
|
+
evidence: { frictionScore: score, status: sub.status, submittedAt: sub.submittedAt },
|
|
176
|
+
});
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
return results
|
|
181
|
+
.sort((a, b) => b.evidence.frictionScore - a.evidence.frictionScore)
|
|
182
|
+
.slice(0, 3);
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
/**
|
|
186
|
+
* Find experiments that have found a winner and are ready to graduate.
|
|
187
|
+
* @param {object} experimentDecisions
|
|
188
|
+
* @returns {object[]}
|
|
189
|
+
*/
|
|
190
|
+
function findReadyExperiments(experimentDecisions = {}) {
|
|
191
|
+
const results = [];
|
|
192
|
+
const evaluations = experimentDecisions.evaluations || [];
|
|
193
|
+
|
|
194
|
+
for (const ev of evaluations) {
|
|
195
|
+
if (ev.status === "winner-found") {
|
|
196
|
+
results.push({
|
|
197
|
+
priority: "medium",
|
|
198
|
+
category: "experiment-graduation",
|
|
199
|
+
slug: ev.experimentId,
|
|
200
|
+
title: `Graduate experiment: ${ev.experimentId}`,
|
|
201
|
+
insight: `Winner found — variant "${ev.winnerKey}" outperformed`,
|
|
202
|
+
action: `Apply winning variant to overrides.json and conclude experiment`,
|
|
203
|
+
evidence: {
|
|
204
|
+
experimentId: ev.experimentId,
|
|
205
|
+
winnerKey: ev.winnerKey,
|
|
206
|
+
recommendation: ev.recommendation || "",
|
|
207
|
+
},
|
|
208
|
+
});
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
return results.slice(0, 3);
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
/**
|
|
216
|
+
* Analyze lint failure patterns from queue health data.
|
|
217
|
+
* @param {object} queueHealth
|
|
218
|
+
* @returns {{ warningsToElevate: object[], docsToRewrite: object[] }}
|
|
219
|
+
*/
|
|
220
|
+
function analyzeLintPatterns(queueHealth = {}) {
|
|
221
|
+
const topFailures = queueHealth.topLintFailures || [];
|
|
222
|
+
const warningsToElevate = [];
|
|
223
|
+
const docsToRewrite = [];
|
|
224
|
+
|
|
225
|
+
for (const failure of topFailures) {
|
|
226
|
+
if (failure.count > 3) {
|
|
227
|
+
warningsToElevate.push({
|
|
228
|
+
warning: failure.reason,
|
|
229
|
+
count: failure.count,
|
|
230
|
+
suggestion: `Promote to error — ${failure.count} occurrences indicate systemic gap`,
|
|
231
|
+
});
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
// Cluster needs-info patterns — look for repeated lint warnings as signals
|
|
236
|
+
// that the submission docs need rewriting
|
|
237
|
+
const docTopics = {};
|
|
238
|
+
for (const failure of topFailures) {
|
|
239
|
+
const reason = failure.reason.toLowerCase();
|
|
240
|
+
if (reason.includes("install")) docTopics["install command"] = (docTopics["install command"] || 0) + failure.count;
|
|
241
|
+
if (reason.includes("quickstart")) docTopics["quickstart"] = (docTopics["quickstart"] || 0) + failure.count;
|
|
242
|
+
if (reason.includes("proof")) docTopics["proof links"] = (docTopics["proof links"] || 0) + failure.count;
|
|
243
|
+
if (reason.includes("pitch")) docTopics["pitch format"] = (docTopics["pitch format"] || 0) + failure.count;
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
for (const [topic, count] of Object.entries(docTopics)) {
|
|
247
|
+
if (count > 3) {
|
|
248
|
+
docsToRewrite.push({
|
|
249
|
+
topic,
|
|
250
|
+
occurrences: count,
|
|
251
|
+
suggestion: `Rewrite submission guide section on "${topic}" — ${count} failures suggest unclear docs`,
|
|
252
|
+
});
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
return { warningsToElevate, docsToRewrite };
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
/**
|
|
260
|
+
* Build lint-promotion recommendations from lint insights.
|
|
261
|
+
* @param {{ warningsToElevate: object[], docsToRewrite: object[] }} lintInsights
|
|
262
|
+
* @returns {object[]}
|
|
263
|
+
*/
|
|
264
|
+
function buildLintRecommendations(lintInsights) {
|
|
265
|
+
const results = [];
|
|
266
|
+
|
|
267
|
+
for (const item of (lintInsights.warningsToElevate || []).slice(0, 3)) {
|
|
268
|
+
results.push({
|
|
269
|
+
priority: "low",
|
|
270
|
+
category: "lint-promotion",
|
|
271
|
+
slug: item.warning,
|
|
272
|
+
title: `Promote lint warning: "${item.warning}"`,
|
|
273
|
+
insight: `Appears ${item.count} times — consistent failure pattern`,
|
|
274
|
+
action: item.suggestion,
|
|
275
|
+
evidence: { warning: item.warning, count: item.count },
|
|
276
|
+
});
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
return results;
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
// ── Main Orchestrator ───────────────────────────────────────
|
|
283
|
+
|
|
284
|
+
/**
|
|
285
|
+
* Build all recommendations from input data.
|
|
286
|
+
* @param {object} inputs
|
|
287
|
+
* @param {{ maxRecommendations?: number }} opts
|
|
288
|
+
* @returns {object}
|
|
289
|
+
*/
|
|
290
|
+
export function buildRecommendations(inputs, opts = {}) {
|
|
291
|
+
const { maxRecommendations = 20 } = opts;
|
|
292
|
+
|
|
293
|
+
// Compute signals
|
|
294
|
+
const trustByWeek = inputs.rollup?.metrics?.trustInteractionScoreByWeek || {};
|
|
295
|
+
const proofEngagementBySlug = computeProofEngagementBySlug(inputs.rollup?.bySlug || {});
|
|
296
|
+
const submissionFrictionBySlug = computeSubmissionFrictionBySlug(
|
|
297
|
+
inputs.submissions?.submissions || [],
|
|
298
|
+
inputs.lintReports || {},
|
|
299
|
+
{ now: opts.now },
|
|
300
|
+
);
|
|
301
|
+
|
|
302
|
+
const signals = { trustByWeek, proofEngagementBySlug, submissionFrictionBySlug };
|
|
303
|
+
|
|
304
|
+
// Gather recommendations from all categories
|
|
305
|
+
const recommendations = [];
|
|
306
|
+
|
|
307
|
+
recommendations.push(...findHighTrustTools(signals, inputs.overrides || {}));
|
|
308
|
+
recommendations.push(...findLowProofEngagementTools(signals, inputs.rollup || {}));
|
|
309
|
+
recommendations.push(...findHighFrictionSubmissions(signals, inputs.submissions || {}));
|
|
310
|
+
recommendations.push(...findReadyExperiments(inputs.experimentDecisions || {}));
|
|
311
|
+
|
|
312
|
+
// Lint patterns analysis
|
|
313
|
+
const lintInsights = analyzeLintPatterns(inputs.queueHealth || {});
|
|
314
|
+
recommendations.push(...buildLintRecommendations(lintInsights));
|
|
315
|
+
|
|
316
|
+
// Sort by priority (high first)
|
|
317
|
+
const priorityOrder = { high: 0, medium: 1, low: 2 };
|
|
318
|
+
recommendations.sort((a, b) => (priorityOrder[a.priority] ?? 9) - (priorityOrder[b.priority] ?? 9));
|
|
319
|
+
|
|
320
|
+
return {
|
|
321
|
+
generatedAt: new Date().toISOString(),
|
|
322
|
+
signals,
|
|
323
|
+
recommendations: recommendations.slice(0, maxRecommendations),
|
|
324
|
+
guardrails: {
|
|
325
|
+
dailyEventCaps: { max: 500 },
|
|
326
|
+
suspiciousSources: [],
|
|
327
|
+
multiEventCorroboration: true,
|
|
328
|
+
},
|
|
329
|
+
lintInsights,
|
|
330
|
+
};
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
// ── Pipeline ────────────────────────────────────────────────
|
|
334
|
+
|
|
335
|
+
function loadJsonSafe(filePath) {
|
|
336
|
+
try {
|
|
337
|
+
return JSON.parse(fs.readFileSync(filePath, "utf8"));
|
|
338
|
+
} catch {
|
|
339
|
+
return null;
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
/**
|
|
344
|
+
* Generate recommendations from data files.
|
|
345
|
+
* @param {object} opts
|
|
346
|
+
*/
|
|
347
|
+
export function genRecommendations(opts = {}) {
|
|
348
|
+
const {
|
|
349
|
+
rollupPath = path.join(ROOT, config.paths.dataDir, "telemetry", "rollup.json"),
|
|
350
|
+
queueHealthPath = path.join(ROOT, config.paths.dataDir, "queue-health.json"),
|
|
351
|
+
worthyPath = path.join(ROOT, config.paths.dataDir, "worthy.json"),
|
|
352
|
+
overridesPath = path.join(ROOT, config.paths.dataDir, "overrides.json"),
|
|
353
|
+
submissionsPath = path.join(ROOT, config.paths.dataDir, "submissions.json"),
|
|
354
|
+
experimentsPath = path.join(ROOT, config.paths.dataDir, "experiments.json"),
|
|
355
|
+
experimentDecisionsPath = path.join(ROOT, config.paths.dataDir, "experiment-decisions.json"),
|
|
356
|
+
lintDir = path.join(ROOT, config.paths.dataDir, "lint-reports"),
|
|
357
|
+
outputPath = path.join(ROOT, config.paths.dataDir, "recommendations.json"),
|
|
358
|
+
dryRun = false,
|
|
359
|
+
} = opts;
|
|
360
|
+
|
|
361
|
+
console.log("Generating recommendations...");
|
|
362
|
+
console.log(` Mode: ${dryRun ? "DRY RUN" : "LIVE"}`);
|
|
363
|
+
|
|
364
|
+
// Load all inputs (fail-soft)
|
|
365
|
+
const inputs = {
|
|
366
|
+
rollup: loadJsonSafe(rollupPath),
|
|
367
|
+
queueHealth: loadJsonSafe(queueHealthPath),
|
|
368
|
+
worthy: loadJsonSafe(worthyPath),
|
|
369
|
+
overrides: loadJsonSafe(overridesPath),
|
|
370
|
+
submissions: loadJsonSafe(submissionsPath),
|
|
371
|
+
experiments: loadJsonSafe(experimentsPath),
|
|
372
|
+
experimentDecisions: loadJsonSafe(experimentDecisionsPath),
|
|
373
|
+
lintReports: {},
|
|
374
|
+
};
|
|
375
|
+
|
|
376
|
+
// Load lint reports if directory exists
|
|
377
|
+
try {
|
|
378
|
+
if (fs.existsSync(lintDir)) {
|
|
379
|
+
const files = fs.readdirSync(lintDir).filter((f) => f.endsWith(".json"));
|
|
380
|
+
for (const file of files) {
|
|
381
|
+
const slug = file.replace(/\.json$/, "");
|
|
382
|
+
const report = loadJsonSafe(path.join(lintDir, file));
|
|
383
|
+
if (report) inputs.lintReports[slug] = report;
|
|
384
|
+
}
|
|
385
|
+
}
|
|
386
|
+
} catch { /* fail soft */ }
|
|
387
|
+
|
|
388
|
+
const result = buildRecommendations(inputs);
|
|
389
|
+
|
|
390
|
+
if (dryRun) {
|
|
391
|
+
console.log(` [dry-run] Recommendation generation complete.`);
|
|
392
|
+
} else {
|
|
393
|
+
fs.writeFileSync(outputPath, JSON.stringify(result, null, 2) + "\n");
|
|
394
|
+
console.log(` Wrote ${outputPath}`);
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
console.log(` Recommendations: ${result.recommendations.length}`);
|
|
398
|
+
console.log(` Signals: ${Object.keys(result.signals.proofEngagementBySlug).length} tools with engagement`);
|
|
399
|
+
console.log(` Lint insights: ${result.lintInsights.warningsToElevate.length} warnings to elevate`);
|
|
400
|
+
|
|
401
|
+
return result;
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
// ── CLI ─────────────────────────────────────────────────────
|
|
405
|
+
|
|
406
|
+
if (isMain) {
|
|
407
|
+
const dryRun = process.argv.includes("--dry-run");
|
|
408
|
+
genRecommendations({ dryRun });
|
|
409
|
+
}
|
|
@@ -0,0 +1,266 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Telemetry Aggregator
|
|
5
|
+
*
|
|
6
|
+
* Reads raw event JSONL files and produces aggregated rollup + daily JSON.
|
|
7
|
+
*
|
|
8
|
+
* Usage:
|
|
9
|
+
* node scripts/gen-telemetry-aggregate.mjs [--dry-run]
|
|
10
|
+
*
|
|
11
|
+
* Reads:
|
|
12
|
+
* site/src/data/telemetry/events/*.jsonl
|
|
13
|
+
*
|
|
14
|
+
* Writes:
|
|
15
|
+
* site/src/data/telemetry/rollup.json
|
|
16
|
+
* site/src/data/telemetry/daily/YYYY-MM-DD.json
|
|
17
|
+
*/
|
|
18
|
+
|
|
19
|
+
import { readFileSync, readdirSync, writeFileSync, mkdirSync, existsSync } from "node:fs";
|
|
20
|
+
import { resolve, join } from "node:path";
|
|
21
|
+
import { getConfig, getRoot } from "./lib/config.mjs";
|
|
22
|
+
|
|
23
|
+
const ROOT = getRoot();
|
|
24
|
+
const config = getConfig();
|
|
25
|
+
|
|
26
|
+
// ── Valid event types ─────────────────────────────────────────
|
|
27
|
+
|
|
28
|
+
export const VALID_EVENT_TYPES = [
|
|
29
|
+
"copy_proof_link", "copy_bundle", "copy_verify_cmd",
|
|
30
|
+
"copy_install", "copy_proof_bullets", "copy_claim",
|
|
31
|
+
"click_evidence_link", "click_receipt_link", "click_submit_link",
|
|
32
|
+
];
|
|
33
|
+
|
|
34
|
+
// ── Parsing ───────────────────────────────────────────────────
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Parse a JSONL string into an array of events.
|
|
38
|
+
* Skips malformed lines silently.
|
|
39
|
+
* @param {string} content
|
|
40
|
+
* @returns {object[]}
|
|
41
|
+
*/
|
|
42
|
+
export function parseEventsFile(content) {
|
|
43
|
+
if (!content || !content.trim()) return [];
|
|
44
|
+
const events = [];
|
|
45
|
+
for (const line of content.split("\n")) {
|
|
46
|
+
const trimmed = line.trim();
|
|
47
|
+
if (!trimmed) continue;
|
|
48
|
+
try {
|
|
49
|
+
const evt = JSON.parse(trimmed);
|
|
50
|
+
if (evt && evt.type && evt.timestamp) {
|
|
51
|
+
events.push(evt);
|
|
52
|
+
}
|
|
53
|
+
} catch {
|
|
54
|
+
// skip malformed lines
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
return events;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// ── Aggregation ───────────────────────────────────────────────
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* Aggregate events into counts by type, slug, and week.
|
|
64
|
+
* Includes anti-gaming guardrails: per-day caps and spike detection.
|
|
65
|
+
* @param {object[]} events
|
|
66
|
+
* @param {{ enableCaps?: boolean, dailyCapPerType?: number, spikeThreshold?: number }} opts
|
|
67
|
+
* @returns {{ generatedAt: string, totalEvents: number, byType: object, bySlug: object, byWeek: object, metrics: object, guardrails: object }}
|
|
68
|
+
*/
|
|
69
|
+
export function aggregateEvents(events, opts = {}) {
|
|
70
|
+
const { enableCaps = true, dailyCapPerType = 50, spikeThreshold = 300 } = opts;
|
|
71
|
+
|
|
72
|
+
// Phase 1: Apply per-day caps
|
|
73
|
+
const dailyTypeCounts = {};
|
|
74
|
+
const cappedEvents = [];
|
|
75
|
+
const guardrails = {
|
|
76
|
+
totalEventsProcessed: events.length,
|
|
77
|
+
eventsCapped: 0,
|
|
78
|
+
suspiciousDays: [],
|
|
79
|
+
};
|
|
80
|
+
|
|
81
|
+
for (const evt of events) {
|
|
82
|
+
if (!evt.timestamp || !evt.type) {
|
|
83
|
+
cappedEvents.push(evt);
|
|
84
|
+
continue;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
const day = evt.timestamp.slice(0, 10);
|
|
88
|
+
const key = `${day}:${evt.type}`;
|
|
89
|
+
|
|
90
|
+
if (enableCaps) {
|
|
91
|
+
const count = dailyTypeCounts[key] || 0;
|
|
92
|
+
if (count >= dailyCapPerType) {
|
|
93
|
+
guardrails.eventsCapped++;
|
|
94
|
+
continue;
|
|
95
|
+
}
|
|
96
|
+
dailyTypeCounts[key] = count + 1;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
cappedEvents.push(evt);
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// Phase 2: Spike detection
|
|
103
|
+
const dailyTotals = {};
|
|
104
|
+
for (const evt of cappedEvents) {
|
|
105
|
+
if (evt.timestamp) {
|
|
106
|
+
const day = evt.timestamp.slice(0, 10);
|
|
107
|
+
dailyTotals[day] = (dailyTotals[day] || 0) + 1;
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
for (const [day, count] of Object.entries(dailyTotals)) {
|
|
111
|
+
if (count > spikeThreshold) {
|
|
112
|
+
guardrails.suspiciousDays.push({ day, count });
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
// Phase 3: Standard aggregation on capped events
|
|
117
|
+
const byType = {};
|
|
118
|
+
const bySlug = {};
|
|
119
|
+
const byWeek = {};
|
|
120
|
+
|
|
121
|
+
for (const evt of cappedEvents) {
|
|
122
|
+
// By type
|
|
123
|
+
byType[evt.type] = (byType[evt.type] || 0) + 1;
|
|
124
|
+
|
|
125
|
+
// By slug
|
|
126
|
+
const slug = evt.payload?.slug;
|
|
127
|
+
if (slug) {
|
|
128
|
+
if (!bySlug[slug]) bySlug[slug] = {};
|
|
129
|
+
bySlug[slug][evt.type] = (bySlug[slug][evt.type] || 0) + 1;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
// By week
|
|
133
|
+
const week = evt.payload?.week;
|
|
134
|
+
if (week) {
|
|
135
|
+
if (!byWeek[week]) byWeek[week] = {};
|
|
136
|
+
byWeek[week][evt.type] = (byWeek[week][evt.type] || 0) + 1;
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
const metrics = computeMetrics(byType, byWeek);
|
|
141
|
+
|
|
142
|
+
return {
|
|
143
|
+
generatedAt: new Date().toISOString(),
|
|
144
|
+
totalEvents: cappedEvents.length,
|
|
145
|
+
byType,
|
|
146
|
+
bySlug,
|
|
147
|
+
byWeek,
|
|
148
|
+
metrics,
|
|
149
|
+
guardrails,
|
|
150
|
+
};
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
/**
|
|
154
|
+
* Compute derived metrics from aggregated type counts.
|
|
155
|
+
* @param {object} byType
|
|
156
|
+
* @param {object} byWeek
|
|
157
|
+
* @returns {object}
|
|
158
|
+
*/
|
|
159
|
+
export function computeMetrics(byType, byWeek = {}) {
|
|
160
|
+
const copyProofLink = byType.copy_proof_link || 0;
|
|
161
|
+
const copyBundle = byType.copy_bundle || 0;
|
|
162
|
+
const copyVerifyCmd = byType.copy_verify_cmd || 0;
|
|
163
|
+
const totalVerifyActions = copyProofLink + copyBundle + copyVerifyCmd;
|
|
164
|
+
const verificationRate = totalVerifyActions > 0 ? copyBundle / totalVerifyActions : 0;
|
|
165
|
+
|
|
166
|
+
const totalProofActions =
|
|
167
|
+
(byType.copy_proof_bullets || 0) +
|
|
168
|
+
(byType.copy_claim || 0) +
|
|
169
|
+
(byType.click_evidence_link || 0);
|
|
170
|
+
|
|
171
|
+
const submissionClicks = byType.click_submit_link || 0;
|
|
172
|
+
|
|
173
|
+
// Trust Interaction Score per week
|
|
174
|
+
const trustInteractionScoreByWeek = {};
|
|
175
|
+
for (const [week, counts] of Object.entries(byWeek)) {
|
|
176
|
+
trustInteractionScoreByWeek[week] =
|
|
177
|
+
(counts.copy_bundle || 0) +
|
|
178
|
+
(counts.copy_verify_cmd || 0) +
|
|
179
|
+
(counts.click_receipt_link || 0);
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
return {
|
|
183
|
+
verificationRate: Math.round(verificationRate * 10000) / 10000,
|
|
184
|
+
totalVerifyActions,
|
|
185
|
+
totalProofActions,
|
|
186
|
+
submissionClicks,
|
|
187
|
+
trustInteractionScoreByWeek,
|
|
188
|
+
};
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
// ── Pipeline ──────────────────────────────────────────────────
|
|
192
|
+
|
|
193
|
+
/**
|
|
194
|
+
* Read all JSONL event files, aggregate, write output.
|
|
195
|
+
* @param {{ eventsDir?: string, outputPath?: string, dailyDir?: string, dryRun?: boolean }} opts
|
|
196
|
+
*/
|
|
197
|
+
export function genTelemetryAggregate(opts = {}) {
|
|
198
|
+
const {
|
|
199
|
+
eventsDir = join(ROOT, config.paths.dataDir, "telemetry", "events"),
|
|
200
|
+
outputPath = join(ROOT, config.paths.dataDir, "telemetry", "rollup.json"),
|
|
201
|
+
dailyDir = join(ROOT, config.paths.dataDir, "telemetry", "daily"),
|
|
202
|
+
dryRun = false,
|
|
203
|
+
} = opts;
|
|
204
|
+
|
|
205
|
+
// Collect all events
|
|
206
|
+
const allEvents = [];
|
|
207
|
+
const eventsByDay = {};
|
|
208
|
+
|
|
209
|
+
if (existsSync(eventsDir)) {
|
|
210
|
+
const files = readdirSync(eventsDir).filter((f) => f.endsWith(".jsonl"));
|
|
211
|
+
for (const file of files) {
|
|
212
|
+
const content = readFileSync(join(eventsDir, file), "utf8");
|
|
213
|
+
const events = parseEventsFile(content);
|
|
214
|
+
allEvents.push(...events);
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
// Group by day for daily rollups
|
|
219
|
+
for (const evt of allEvents) {
|
|
220
|
+
const day = evt.timestamp.slice(0, 10); // YYYY-MM-DD
|
|
221
|
+
if (!eventsByDay[day]) eventsByDay[day] = [];
|
|
222
|
+
eventsByDay[day].push(evt);
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
// Aggregate all
|
|
226
|
+
const rollup = aggregateEvents(allEvents);
|
|
227
|
+
|
|
228
|
+
if (dryRun) {
|
|
229
|
+
console.log(` [dry-run] Telemetry aggregation complete.`);
|
|
230
|
+
console.log(` Total events: ${allEvents.length}`);
|
|
231
|
+
console.log(` Days with data: ${Object.keys(eventsByDay).length}`);
|
|
232
|
+
return { rollup, dailyCount: Object.keys(eventsByDay).length };
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
// Write rollup
|
|
236
|
+
writeFileSync(outputPath, JSON.stringify(rollup, null, 2) + "\n", "utf8");
|
|
237
|
+
|
|
238
|
+
// Write daily rollups
|
|
239
|
+
mkdirSync(dailyDir, { recursive: true });
|
|
240
|
+
for (const [day, events] of Object.entries(eventsByDay)) {
|
|
241
|
+
const daily = aggregateEvents(events);
|
|
242
|
+
writeFileSync(
|
|
243
|
+
join(dailyDir, `${day}.json`),
|
|
244
|
+
JSON.stringify(daily, null, 2) + "\n",
|
|
245
|
+
"utf8",
|
|
246
|
+
);
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
return { rollup, dailyCount: Object.keys(eventsByDay).length };
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
// ── Entry point ───────────────────────────────────────────────
|
|
253
|
+
|
|
254
|
+
const isMain = process.argv[1] && resolve(process.argv[1]).endsWith("gen-telemetry-aggregate.mjs");
|
|
255
|
+
if (isMain) {
|
|
256
|
+
const dryRun = process.argv.includes("--dry-run");
|
|
257
|
+
console.log("Aggregating telemetry events...");
|
|
258
|
+
if (dryRun) console.log(" Mode: DRY RUN");
|
|
259
|
+
|
|
260
|
+
const result = genTelemetryAggregate({ dryRun });
|
|
261
|
+
if (!dryRun) {
|
|
262
|
+
console.log(` Total events: ${result.rollup.totalEvents}`);
|
|
263
|
+
console.log(` Daily rollups: ${result.dailyCount}`);
|
|
264
|
+
console.log(` Verification rate: ${(result.rollup.metrics.verificationRate * 100).toFixed(1)}%`);
|
|
265
|
+
}
|
|
266
|
+
}
|