@rarusoft/dendrite-wiki 0.1.0-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/README.md +79 -0
  2. package/dist/api-extractor/extract.js +269 -0
  3. package/dist/api-extractor/language-extractor.js +15 -0
  4. package/dist/api-extractor/python-extractor.js +358 -0
  5. package/dist/api-extractor/render.js +195 -0
  6. package/dist/api-extractor/tree-sitter-extractor.js +1079 -0
  7. package/dist/api-extractor/types.js +11 -0
  8. package/dist/api-extractor/typescript-extractor.js +50 -0
  9. package/dist/api-extractor/walk.js +178 -0
  10. package/dist/api-reference.js +438 -0
  11. package/dist/benchmark-events.js +129 -0
  12. package/dist/benchmark.js +270 -0
  13. package/dist/binder-export.js +381 -0
  14. package/dist/canonical-target.js +168 -0
  15. package/dist/chart-insert.js +377 -0
  16. package/dist/chart-prompts.js +414 -0
  17. package/dist/context-cache.js +98 -0
  18. package/dist/contradicts-shipped-memory.js +232 -0
  19. package/dist/diff-context.js +142 -0
  20. package/dist/doctor.js +220 -0
  21. package/dist/generated-docs.js +219 -0
  22. package/dist/i18n.js +71 -0
  23. package/dist/index.js +49 -0
  24. package/dist/librarian.js +255 -0
  25. package/dist/maintenance-actions.js +244 -0
  26. package/dist/maintenance-inbox.js +842 -0
  27. package/dist/maintenance-runner.js +62 -0
  28. package/dist/page-drift.js +225 -0
  29. package/dist/page-inbox.js +168 -0
  30. package/dist/report-export.js +339 -0
  31. package/dist/review-bridge.js +1386 -0
  32. package/dist/search-index.js +199 -0
  33. package/dist/store.js +1617 -0
  34. package/dist/telemetry-defaults.js +44 -0
  35. package/dist/telemetry-report.js +263 -0
  36. package/dist/telemetry.js +544 -0
  37. package/dist/wiki-synthesis.js +901 -0
  38. package/package.json +35 -0
  39. package/src/api-extractor/extract.ts +333 -0
  40. package/src/api-extractor/language-extractor.ts +37 -0
  41. package/src/api-extractor/python-extractor.ts +380 -0
  42. package/src/api-extractor/render.ts +267 -0
  43. package/src/api-extractor/tree-sitter-extractor.ts +1210 -0
  44. package/src/api-extractor/types.ts +41 -0
  45. package/src/api-extractor/typescript-extractor.ts +56 -0
  46. package/src/api-extractor/walk.ts +209 -0
  47. package/src/api-reference.ts +552 -0
  48. package/src/benchmark-events.ts +216 -0
  49. package/src/benchmark.ts +376 -0
  50. package/src/binder-export.ts +437 -0
  51. package/src/canonical-target.ts +192 -0
  52. package/src/chart-insert.ts +478 -0
  53. package/src/chart-prompts.ts +417 -0
  54. package/src/context-cache.ts +129 -0
  55. package/src/contradicts-shipped-memory.ts +311 -0
  56. package/src/diff-context.ts +187 -0
  57. package/src/doctor.ts +260 -0
  58. package/src/generated-docs.ts +316 -0
  59. package/src/i18n.ts +106 -0
  60. package/src/index.ts +59 -0
  61. package/src/librarian.ts +331 -0
  62. package/src/maintenance-actions.ts +314 -0
  63. package/src/maintenance-inbox.ts +1132 -0
  64. package/src/maintenance-runner.ts +85 -0
  65. package/src/page-drift.ts +292 -0
  66. package/src/page-inbox.ts +254 -0
  67. package/src/report-export.ts +392 -0
  68. package/src/review-bridge.ts +1729 -0
  69. package/src/search-index.ts +266 -0
  70. package/src/store.ts +2171 -0
  71. package/src/telemetry-defaults.ts +50 -0
  72. package/src/telemetry-report.ts +365 -0
  73. package/src/telemetry.ts +757 -0
  74. package/src/wiki-synthesis.ts +1307 -0
@@ -0,0 +1,544 @@
1
+ /**
2
+ * Opt-in telemetry — local-first, explicitly-consented benchmark sharing.
3
+ *
4
+ * Telemetry is OFF by default. Setting `DENDRITE_WIKI_TELEMETRY_SHARING=opt-in` (or
5
+ * running `dendrite-wiki telemetry opt-in`) records explicit consent in
6
+ * `local-data/telemetry-config.json` — but consent alone does not send anything. The
7
+ * operator must additionally configure `DENDRITE_WIKI_TELEMETRY_TURSO_URL` and
8
+ * `_TOKEN` to point at a Turso libSQL database THEY own; only then does
9
+ * `dendrite-wiki telemetry upload` push a sanitized aggregate payload there.
10
+ *
11
+ * Sanitization is deliberate: page counts, lint summaries, and recall scores ship; raw
12
+ * page content, memory bodies, file paths, and project-log entries DO NOT. The audit log
13
+ * at `local-data/telemetry-upload-audit.jsonl` records every send so the operator can
14
+ * verify what left the machine. There is no Anthropic-managed backend in this milestone
15
+ * — the only destination is the operator's own database.
16
+ */
17
+ import { promises as fs } from 'node:fs';
18
+ import { randomUUID } from 'node:crypto';
19
+ import path from 'node:path';
20
+ import { TELEMETRY_DEFAULT_TABLE, TELEMETRY_DEFAULT_TOKEN, TELEMETRY_DEFAULT_URL } from './telemetry-defaults.js';
21
+ const dataDirRelativePath = process.env.DENDRITE_WIKI_DATA_DIR ?? 'local-data';
22
+ const telemetryConfigRelativePath = path.join(dataDirRelativePath, 'telemetry.json');
23
+ const telemetryUploadAuditRelativePath = path.join(dataDirRelativePath, 'telemetry-upload-audit.json');
24
+ const benchmarkEventLogRelativePath = path.join(dataDirRelativePath, 'benchmark-events.jsonl');
25
+ const benchmarkEventSummaryRelativePath = path.join('docs', 'public', 'dendrite-benchmark-events-summary.json');
26
+ const telemetryStatusArtifactRelativePath = path.join('docs', 'public', 'dendrite-telemetry-status.json');
27
+ const DEFAULT_AUTO_UPLOAD_THROTTLE_HOURS = 24;
28
+ /**
29
+ * Default throttle window for the auto-upload path. Operators can override via the
30
+ * env var `DENDRITE_WIKI_TELEMETRY_AUTO_UPLOAD_HOURS` (positive integer). Set
31
+ * `DENDRITE_WIKI_TELEMETRY_AUTO_UPLOAD=off` to disable the auto path entirely
32
+ * while keeping consent on (manual `dendrite-wiki telemetry upload` or the browser
33
+ * button still works).
34
+ */
35
+ function resolveAutoUploadThrottleHours() {
36
+ const disabled = (process.env.DENDRITE_WIKI_TELEMETRY_AUTO_UPLOAD ?? '').trim().toLowerCase();
37
+ if (disabled === 'off' || disabled === 'false' || disabled === '0' || disabled === 'no' || disabled === 'disable' || disabled === 'disabled') {
38
+ return null;
39
+ }
40
+ const raw = (process.env.DENDRITE_WIKI_TELEMETRY_AUTO_UPLOAD_HOURS ?? '').trim();
41
+ if (!raw)
42
+ return DEFAULT_AUTO_UPLOAD_THROTTLE_HOURS;
43
+ const parsed = Number.parseInt(raw, 10);
44
+ if (!Number.isFinite(parsed) || parsed < 1)
45
+ return DEFAULT_AUTO_UPLOAD_THROTTLE_HOURS;
46
+ return Math.min(parsed, 24 * 30); // hard cap at 30 days to avoid silent year-long throttles from a typo
47
+ }
48
+ /**
49
+ * T11: best-effort auto-upload at session start. Called from src/index.ts after the
50
+ * `session_started` benchmark event, runs in the background (never awaited from the
51
+ * server boot path), and short-circuits silently when:
52
+ *
53
+ * - consent is off (sharing not opted in)
54
+ * - operator set `DENDRITE_WIKI_TELEMETRY_AUTO_UPLOAD=off`
55
+ * - no upload destination is resolvable (env vars unset AND baked defaults empty)
56
+ * - the last attempt landed within the throttle window
57
+ *
58
+ * When all conditions allow, it triggers `uploadTelemetry()` once. The user never had
59
+ * to click anything after the original opt-in — that's the whole point.
60
+ */
61
+ export async function maybeAutoUploadTelemetry(options = {}) {
62
+ const root = path.resolve(options.root ?? process.cwd());
63
+ try {
64
+ const config = await readTelemetryConfig(root).catch(() => null);
65
+ if (config?.sharingMode !== 'opt-in') {
66
+ return { fired: false, reason: 'no-consent' };
67
+ }
68
+ const throttleHours = resolveAutoUploadThrottleHours();
69
+ if (throttleHours === null) {
70
+ return { fired: false, reason: 'auto-disabled' };
71
+ }
72
+ const target = resolveLibsqlUploadTarget();
73
+ if (!target.configured) {
74
+ return { fired: false, reason: 'no-destination' };
75
+ }
76
+ const { uploadAuditPath } = resolveTelemetryPaths(root);
77
+ const audit = await readTelemetryUploadAudit(uploadAuditPath).catch(() => null);
78
+ const lastAttemptIso = audit?.lastAttempt?.attemptedAt ?? null;
79
+ const hoursSinceLastAttempt = lastAttemptIso
80
+ ? (Date.now() - new Date(lastAttemptIso).getTime()) / (1000 * 60 * 60)
81
+ : null;
82
+ if (hoursSinceLastAttempt !== null && hoursSinceLastAttempt < throttleHours) {
83
+ return {
84
+ fired: false,
85
+ reason: 'throttled',
86
+ hoursSinceLastAttempt: Math.round(hoursSinceLastAttempt * 10) / 10,
87
+ detail: `Last attempt ${Math.round(hoursSinceLastAttempt * 10) / 10}h ago, throttle window is ${throttleHours}h.`
88
+ };
89
+ }
90
+ const result = await uploadTelemetry({ root, fetchImpl: options.fetchImpl, packageVersion: options.packageVersion });
91
+ return {
92
+ fired: true,
93
+ reason: result.ok ? 'uploaded' : 'error',
94
+ detail: result.message
95
+ };
96
+ }
97
+ catch (error) {
98
+ return {
99
+ fired: false,
100
+ reason: 'error',
101
+ detail: error instanceof Error ? error.message : String(error)
102
+ };
103
+ }
104
+ }
105
+ export function resolveTelemetryPaths(root = process.cwd()) {
106
+ const resolvedRoot = path.resolve(root);
107
+ return {
108
+ root: resolvedRoot,
109
+ configPath: path.join(resolvedRoot, telemetryConfigRelativePath),
110
+ statusArtifactPath: path.join(resolvedRoot, telemetryStatusArtifactRelativePath),
111
+ uploadAuditPath: path.join(resolvedRoot, telemetryUploadAuditRelativePath),
112
+ benchmarkEventLogPath: path.join(resolvedRoot, benchmarkEventLogRelativePath),
113
+ benchmarkEventSummaryPath: path.join(resolvedRoot, benchmarkEventSummaryRelativePath)
114
+ };
115
+ }
116
+ export async function readTelemetryConfig(root = process.cwd()) {
117
+ const { configPath } = resolveTelemetryPaths(root);
118
+ const content = await fs.readFile(configPath, 'utf8').catch((error) => {
119
+ if (error.code === 'ENOENT') {
120
+ return null;
121
+ }
122
+ throw error;
123
+ });
124
+ if (content === null) {
125
+ return null;
126
+ }
127
+ const parsed = JSON.parse(content);
128
+ if (parsed.schemaVersion !== 1) {
129
+ throw new Error(`Unsupported telemetry config schema in ${toPortablePath(path.relative(root, configPath))}.`);
130
+ }
131
+ if (parsed.sharingMode !== 'off' && parsed.sharingMode !== 'opt-in') {
132
+ throw new Error(`Invalid telemetry sharing mode in ${toPortablePath(path.relative(root, configPath))}.`);
133
+ }
134
+ if (typeof parsed.updatedAt !== 'string' || parsed.updatedAt.length === 0) {
135
+ throw new Error(`Telemetry config in ${toPortablePath(path.relative(root, configPath))} is missing updatedAt.`);
136
+ }
137
+ if (typeof parsed.installationId !== 'string' || parsed.installationId.length === 0) {
138
+ throw new Error(`Telemetry config in ${toPortablePath(path.relative(root, configPath))} is missing installationId.`);
139
+ }
140
+ if (typeof parsed.projectId !== 'string' || parsed.projectId.length === 0) {
141
+ throw new Error(`Telemetry config in ${toPortablePath(path.relative(root, configPath))} is missing projectId.`);
142
+ }
143
+ return {
144
+ schemaVersion: 1,
145
+ sharingMode: parsed.sharingMode,
146
+ updatedAt: parsed.updatedAt,
147
+ installationId: parsed.installationId,
148
+ projectId: parsed.projectId
149
+ };
150
+ }
151
+ export async function setTelemetrySharingMode(sharingMode, root = process.cwd()) {
152
+ const existingConfig = await readTelemetryConfig(root).catch((error) => {
153
+ if (error instanceof Error && /missing installationId|missing projectId/.test(error.message)) {
154
+ return null;
155
+ }
156
+ throw error;
157
+ });
158
+ const { configPath } = resolveTelemetryPaths(root);
159
+ const config = {
160
+ schemaVersion: 1,
161
+ sharingMode,
162
+ updatedAt: new Date().toISOString(),
163
+ installationId: existingConfig?.installationId ?? randomUUID(),
164
+ projectId: existingConfig?.projectId ?? randomUUID()
165
+ };
166
+ await fs.mkdir(path.dirname(configPath), { recursive: true });
167
+ await fs.writeFile(configPath, `${JSON.stringify(config, null, 2)}\n`, 'utf8');
168
+ return writeTelemetryStatusArtifact(root);
169
+ }
170
+ export async function writeTelemetryStatusArtifact(root = process.cwd()) {
171
+ const telemetryStatus = await buildTelemetryStatusArtifact(root);
172
+ const { statusArtifactPath } = resolveTelemetryPaths(root);
173
+ await fs.mkdir(path.dirname(statusArtifactPath), { recursive: true });
174
+ await fs.writeFile(statusArtifactPath, `${JSON.stringify(telemetryStatus, null, 2)}\n`, 'utf8');
175
+ return telemetryStatus;
176
+ }
177
+ export async function uploadTelemetry(options = {}) {
178
+ const root = path.resolve(options.root ?? process.cwd());
179
+ const fetchImpl = options.fetchImpl ?? fetch;
180
+ const telemetryConfig = await readTelemetryConfig(root).catch((error) => {
181
+ if (error instanceof Error && /missing installationId|missing projectId/.test(error.message)) {
182
+ return null;
183
+ }
184
+ throw error;
185
+ });
186
+ const target = resolveLibsqlUploadTarget();
187
+ const packageVersion = options.packageVersion ?? (await readPackageVersion(root));
188
+ if (telemetryConfig?.sharingMode !== 'opt-in') {
189
+ return finalizeUploadAttempt(root, target.destination, {
190
+ attemptedAt: new Date().toISOString(),
191
+ status: 'skipped',
192
+ destination: target.destination,
193
+ reason: 'Telemetry sharing is not enabled. Run dendrite-wiki telemetry opt-in first.',
194
+ httpStatus: null,
195
+ responseBody: null,
196
+ payload: null
197
+ });
198
+ }
199
+ if (!target.configured || !target.destination || !target.apiKey) {
200
+ return finalizeUploadAttempt(root, target.destination, {
201
+ attemptedAt: new Date().toISOString(),
202
+ status: 'skipped',
203
+ destination: target.destination,
204
+ reason: 'Turso libSQL upload is not configured. Set DENDRITE_WIKI_TELEMETRY_TURSO_URL (e.g. https://<db>-<org>.turso.io) and DENDRITE_WIKI_TELEMETRY_TURSO_TOKEN (auth token).',
205
+ httpStatus: null,
206
+ responseBody: null,
207
+ payload: null
208
+ });
209
+ }
210
+ const payload = await buildTelemetryUploadPayload(root, telemetryConfig, packageVersion);
211
+ const requestBody = buildLibsqlInsertRequest(target.table, payload);
212
+ let attempt = 0;
213
+ let lastError = null;
214
+ while (attempt < 2) {
215
+ attempt += 1;
216
+ try {
217
+ const response = await fetchImpl(target.destination, {
218
+ method: 'POST',
219
+ headers: {
220
+ 'content-type': 'application/json',
221
+ authorization: `Bearer ${target.apiKey}`
222
+ },
223
+ body: JSON.stringify(requestBody)
224
+ });
225
+ const responseBody = await response.text();
226
+ if (response.ok) {
227
+ // libSQL returns 200 with a results payload even on per-statement errors. Inspect
228
+ // the first response.results[].type — if it's 'error', treat the whole pipeline as
229
+ // failed so the audit reflects reality (the row didn't actually land).
230
+ const pipelineError = parseLibsqlPipelineError(responseBody);
231
+ if (pipelineError) {
232
+ lastError = {
233
+ attemptedAt: new Date().toISOString(),
234
+ status: 'error',
235
+ destination: target.destination,
236
+ reason: `Turso libSQL pipeline reported error: ${pipelineError}`,
237
+ httpStatus: response.status,
238
+ responseBody: responseBody.length > 0 ? responseBody : null,
239
+ payload
240
+ };
241
+ // Per-statement errors are deterministic (e.g. table missing, schema mismatch)
242
+ // so retrying won't help — break out.
243
+ break;
244
+ }
245
+ return finalizeUploadAttempt(root, target.destination, {
246
+ attemptedAt: new Date().toISOString(),
247
+ status: 'success',
248
+ destination: target.destination,
249
+ reason: null,
250
+ httpStatus: response.status,
251
+ responseBody: responseBody.length > 0 ? responseBody : null,
252
+ payload
253
+ });
254
+ }
255
+ lastError = {
256
+ attemptedAt: new Date().toISOString(),
257
+ status: 'error',
258
+ destination: target.destination,
259
+ reason: `Turso libSQL upload failed with HTTP ${response.status}.`,
260
+ httpStatus: response.status,
261
+ responseBody: responseBody.length > 0 ? responseBody : null,
262
+ payload
263
+ };
264
+ if (response.status < 500) {
265
+ break;
266
+ }
267
+ }
268
+ catch (error) {
269
+ lastError = {
270
+ attemptedAt: new Date().toISOString(),
271
+ status: 'error',
272
+ destination: target.destination,
273
+ reason: error instanceof Error ? error.message : String(error),
274
+ httpStatus: null,
275
+ responseBody: null,
276
+ payload
277
+ };
278
+ }
279
+ }
280
+ return finalizeUploadAttempt(root, target.destination, lastError ?? {
281
+ attemptedAt: new Date().toISOString(),
282
+ status: 'error',
283
+ destination: target.destination,
284
+ reason: 'Turso libSQL upload failed.',
285
+ httpStatus: null,
286
+ responseBody: null,
287
+ payload
288
+ });
289
+ }
290
+ // libSQL HTTP API uses a "pipeline" of statements. We always send one INSERT with named args
291
+ // followed by a `close` request so the connection is released cleanly. Schema documented in
292
+ // docs/wiki/privacy-telemetry-disclosure.md alongside the operator setup steps.
293
+ function buildLibsqlInsertRequest(table, payload) {
294
+ const sql = `INSERT INTO ${table} (installation_id, project_id, package_version, event, timestamp, sharing_mode, client_profiles, metrics) VALUES (:installation_id, :project_id, :package_version, :event, :timestamp, :sharing_mode, :client_profiles, :metrics)`;
295
+ const namedArg = (name, value) => value === null
296
+ ? { name, value: { type: 'null' } }
297
+ : { name, value: { type: 'text', value } };
298
+ return {
299
+ requests: [
300
+ {
301
+ type: 'execute',
302
+ stmt: {
303
+ sql,
304
+ named_args: [
305
+ namedArg('installation_id', payload.installationId),
306
+ namedArg('project_id', payload.projectId),
307
+ namedArg('package_version', payload.packageVersion),
308
+ namedArg('event', payload.event),
309
+ namedArg('timestamp', payload.timestamp),
310
+ namedArg('sharing_mode', payload.sharingMode),
311
+ namedArg('client_profiles', JSON.stringify(payload.clientProfiles)),
312
+ namedArg('metrics', JSON.stringify(payload.metrics))
313
+ ]
314
+ }
315
+ },
316
+ { type: 'close' }
317
+ ]
318
+ };
319
+ }
320
+ function parseLibsqlPipelineError(responseBody) {
321
+ try {
322
+ const parsed = JSON.parse(responseBody);
323
+ const errored = (parsed.results ?? []).find((r) => r?.type === 'error');
324
+ if (!errored)
325
+ return null;
326
+ return errored.error?.message ?? 'unknown pipeline error';
327
+ }
328
+ catch {
329
+ return null;
330
+ }
331
+ }
332
+ async function buildTelemetryStatusArtifact(root) {
333
+ const paths = resolveTelemetryPaths(root);
334
+ const config = await readTelemetryConfig(root);
335
+ const benchmarkEventSummary = await readBenchmarkEventSummary(paths.benchmarkEventSummaryPath);
336
+ const uploadAudit = await readTelemetryUploadAudit(paths.uploadAuditPath);
337
+ const uploadTarget = resolveLibsqlUploadTarget();
338
+ const latestEventAt = benchmarkEventSummary?.recentEvents.at(-1)?.timestamp ?? null;
339
+ const sharingMode = config?.sharingMode ?? 'off';
340
+ const notes = buildTelemetryNotes(sharingMode, benchmarkEventSummary?.eventCount ?? 0, uploadTarget.configured, uploadAudit?.lastAttempt ?? null);
341
+ return {
342
+ schemaVersion: 1,
343
+ generatedAt: new Date().toISOString(),
344
+ sharingMode,
345
+ sharingEnabled: sharingMode === 'opt-in',
346
+ consent: {
347
+ isExplicit: config !== null,
348
+ updatedAt: config?.updatedAt ?? null
349
+ },
350
+ paths: {
351
+ configPath: toPortablePath(path.relative(paths.root, paths.configPath)),
352
+ statusArtifactPath: toPortablePath(path.relative(paths.root, paths.statusArtifactPath)),
353
+ uploadAuditPath: toPortablePath(path.relative(paths.root, paths.uploadAuditPath)),
354
+ benchmarkEventLogPath: toPortablePath(path.relative(paths.root, paths.benchmarkEventLogPath)),
355
+ benchmarkEventSummaryPath: toPortablePath(path.relative(paths.root, paths.benchmarkEventSummaryPath))
356
+ },
357
+ remoteUpload: {
358
+ configured: uploadTarget.configured,
359
+ destination: uploadTarget.destination,
360
+ auditPath: toPortablePath(path.relative(paths.root, paths.uploadAuditPath)),
361
+ lastAttemptAt: uploadAudit?.lastAttempt?.attemptedAt ?? null,
362
+ lastAttemptStatus: uploadAudit?.lastAttempt?.status ?? null,
363
+ lastSuccessAt: uploadAudit?.lastSuccess?.attemptedAt ?? null,
364
+ lastError: uploadAudit?.lastAttempt?.status === 'error' ? uploadAudit.lastAttempt.reason : null,
365
+ lastPayloadPreview: uploadAudit?.lastSuccess?.payload ?? uploadAudit?.lastAttempt?.payload ?? null
366
+ },
367
+ benchmarkEvents: {
368
+ eventCount: benchmarkEventSummary?.eventCount ?? 0,
369
+ latestEventAt,
370
+ byType: benchmarkEventSummary?.byType ?? createEmptyEventCounts()
371
+ },
372
+ notes
373
+ };
374
+ }
375
+ async function readBenchmarkEventSummary(summaryPath) {
376
+ const content = await fs.readFile(summaryPath, 'utf8').catch((error) => {
377
+ if (error.code === 'ENOENT') {
378
+ return null;
379
+ }
380
+ throw error;
381
+ });
382
+ if (content === null) {
383
+ return null;
384
+ }
385
+ return JSON.parse(content);
386
+ }
387
+ async function readTelemetryUploadAudit(auditPath) {
388
+ const content = await fs.readFile(auditPath, 'utf8').catch((error) => {
389
+ if (error.code === 'ENOENT') {
390
+ return null;
391
+ }
392
+ throw error;
393
+ });
394
+ if (content === null) {
395
+ return null;
396
+ }
397
+ return JSON.parse(content);
398
+ }
399
+ async function writeTelemetryUploadAudit(root, audit) {
400
+ const { uploadAuditPath } = resolveTelemetryPaths(root);
401
+ await fs.mkdir(path.dirname(uploadAuditPath), { recursive: true });
402
+ await fs.writeFile(uploadAuditPath, `${JSON.stringify(audit, null, 2)}\n`, 'utf8');
403
+ }
404
+ async function finalizeUploadAttempt(root, destination, attempt) {
405
+ const previousAudit = await readTelemetryUploadAudit(resolveTelemetryPaths(root).uploadAuditPath);
406
+ const audit = {
407
+ schemaVersion: 1,
408
+ updatedAt: attempt.attemptedAt,
409
+ destination,
410
+ lastAttempt: attempt,
411
+ lastSuccess: attempt.status === 'success' ? attempt : previousAudit?.lastSuccess ?? null
412
+ };
413
+ await writeTelemetryUploadAudit(root, audit);
414
+ const status = await writeTelemetryStatusArtifact(root);
415
+ return {
416
+ ok: attempt.status === 'success',
417
+ message: attempt.reason ?? (attempt.status === 'success' ? 'Telemetry upload completed.' : 'Telemetry upload skipped.'),
418
+ auditPath: status.paths.uploadAuditPath,
419
+ destination,
420
+ attempt,
421
+ status
422
+ };
423
+ }
424
+ /**
425
+ * T12: build (but never send) the exact payload that `uploadTelemetry()` would
426
+ * post next, so the browser's "What will be sent" preview panel can show users
427
+ * the truth of what leaves their machine before they click the manual Upload
428
+ * button. Returns null when no consent record exists yet (preview is meaningful
429
+ * only after the user has at least once recorded explicit consent — that's when
430
+ * the installationId/projectId UUIDs were generated).
431
+ */
432
+ export async function previewTelemetryUploadPayload(options = {}) {
433
+ const root = path.resolve(options.root ?? process.cwd());
434
+ const config = await readTelemetryConfig(root).catch(() => null);
435
+ if (!config)
436
+ return null;
437
+ const packageVersion = options.packageVersion ?? (await readPackageVersion(root));
438
+ return buildTelemetryUploadPayload(root, config, packageVersion);
439
+ }
440
+ async function buildTelemetryUploadPayload(root, config, packageVersion) {
441
+ const benchmarkEventSummary = await readBenchmarkEventSummary(resolveTelemetryPaths(root).benchmarkEventSummaryPath);
442
+ return {
443
+ schemaVersion: 1,
444
+ installationId: config.installationId,
445
+ projectId: config.projectId,
446
+ packageVersion,
447
+ event: 'telemetry_summary',
448
+ timestamp: new Date().toISOString(),
449
+ sharingMode: 'opt-in',
450
+ clientProfiles: readClientProfilesFromEnv(),
451
+ metrics: {
452
+ eventCount: benchmarkEventSummary?.eventCount ?? 0,
453
+ sessionStartedCount: benchmarkEventSummary?.usage.sessionStartedCount ?? 0,
454
+ contextRequestCount: benchmarkEventSummary?.usage.contextRequestCount ?? 0,
455
+ wikiUpdateCount: benchmarkEventSummary?.usage.wikiUpdateCount ?? 0,
456
+ maintenanceStateChangeCount: benchmarkEventSummary?.usage.maintenanceStateChangeCount ?? 0,
457
+ sessionSnapshotCount: benchmarkEventSummary?.usage.sessionSnapshotCount ?? 0,
458
+ latestContextPageCount: benchmarkEventSummary?.orientation.latestContextPageCount ?? null,
459
+ latestContextOmittedPageCount: benchmarkEventSummary?.orientation.latestContextOmittedPageCount ?? null,
460
+ latestOpenQuestionCount: benchmarkEventSummary?.orientation.latestOpenQuestionCount ?? null,
461
+ acceptedProposalCount: benchmarkEventSummary?.maintenance.acceptedProposalCount ?? 0,
462
+ latestLintFindingCount: benchmarkEventSummary?.maintenance.latestLintFindingCount ?? null,
463
+ latestProposalCount: benchmarkEventSummary?.maintenance.latestProposalCount ?? null
464
+ }
465
+ };
466
+ }
467
+ function resolveLibsqlUploadTarget() {
468
+ // Turso/libSQL HTTP API:
469
+ // - Base URL: the database host (e.g. https://my-db-myorg.turso.io).
470
+ // Endpoint becomes <base>/v2/pipeline.
471
+ // - Token: an authentication token from `turso db tokens create <db>` or the dashboard.
472
+ // - Table: which table to INSERT into (defaults to benchmark_events).
473
+ //
474
+ // Resolution order (Benchmark Telemetry Database Roadmap T2):
475
+ // 1. Env vars (BYO destination — operator-owned Turso DB, wins over baked defaults)
476
+ // 2. Build-time baked defaults from telemetry-defaults.ts (Dendrite-hosted destination,
477
+ // written at publish time only — empty in source)
478
+ // 3. Both empty → upload returns `skipped` with a clear audit entry
479
+ const envUrl = process.env.DENDRITE_WIKI_TELEMETRY_TURSO_URL?.trim() ?? '';
480
+ const envToken = process.env.DENDRITE_WIKI_TELEMETRY_TURSO_TOKEN?.trim() ?? '';
481
+ const envTable = process.env.DENDRITE_WIKI_TELEMETRY_TURSO_TABLE?.trim() ?? '';
482
+ const baseUrl = envUrl || TELEMETRY_DEFAULT_URL.trim();
483
+ const apiKey = envToken || TELEMETRY_DEFAULT_TOKEN.trim();
484
+ const table = envTable || TELEMETRY_DEFAULT_TABLE.trim() || 'benchmark_events';
485
+ const destination = baseUrl ? `${baseUrl.replace(/\/$/, '')}/v2/pipeline` : null;
486
+ if (!baseUrl || !apiKey) {
487
+ return { configured: false, destination, apiKey: apiKey || null, table };
488
+ }
489
+ return { configured: true, destination, apiKey, table };
490
+ }
491
+ function createEmptyEventCounts() {
492
+ return {
493
+ session_started: 0,
494
+ context_requested: 0,
495
+ wiki_updated: 0,
496
+ maintenance_state_changed: 0,
497
+ session_snapshot: 0
498
+ };
499
+ }
500
+ function buildTelemetryNotes(sharingMode, eventCount, uploadConfigured, lastAttempt) {
501
+ const notes = [`Automatic local benchmark events remain enabled and currently include ${eventCount} captured events.`];
502
+ if (sharingMode === 'opt-in') {
503
+ notes.push(uploadConfigured
504
+ ? 'Telemetry sharing consent is recorded locally and the uploader can send the sanitized summary payload when you run dendrite-wiki telemetry upload.'
505
+ : 'Telemetry sharing consent is recorded locally, but no Turso libSQL upload destination is configured yet. Set DENDRITE_WIKI_TELEMETRY_TURSO_URL and DENDRITE_WIKI_TELEMETRY_TURSO_TOKEN to enable uploads.');
506
+ }
507
+ else {
508
+ notes.push('Telemetry sharing is off. Local benchmark artifacts continue to work without sending data anywhere.');
509
+ }
510
+ if (lastAttempt?.status === 'success') {
511
+ notes.push('The last telemetry upload completed successfully and the sanitized payload preview is available on this page.');
512
+ }
513
+ else if (lastAttempt?.status === 'error') {
514
+ notes.push(`The last telemetry upload failed: ${lastAttempt.reason ?? 'unknown error'}`);
515
+ }
516
+ return notes;
517
+ }
518
+ function readClientProfilesFromEnv() {
519
+ const value = process.env.DENDRITE_WIKI_TELEMETRY_CLIENT_PROFILES?.trim();
520
+ if (!value) {
521
+ return [];
522
+ }
523
+ return value
524
+ .split(',')
525
+ .map((item) => item.trim())
526
+ .filter((item) => item.length > 0);
527
+ }
528
+ async function readPackageVersion(root) {
529
+ const packageJsonPath = path.join(root, 'package.json');
530
+ const content = await fs.readFile(packageJsonPath, 'utf8').catch((error) => {
531
+ if (error.code === 'ENOENT') {
532
+ return null;
533
+ }
534
+ throw error;
535
+ });
536
+ if (content === null) {
537
+ return null;
538
+ }
539
+ const parsed = JSON.parse(content);
540
+ return typeof parsed.version === 'string' ? parsed.version : null;
541
+ }
542
+ function toPortablePath(value) {
543
+ return value.replace(/\\/g, '/');
544
+ }