struth 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,184 @@
1
+ import { createHash } from "node:crypto";
2
+ import type { z } from "zod";
3
+ import { DEFAULT_CHECK_INTERVAL_HOURS, SCHEMA_VERSION } from "../constants.js";
4
+ import {
5
+ type CondensedPage,
6
+ type Coverage,
7
+ DocSetManifest,
8
+ type MirrorOptions,
9
+ type PageRef,
10
+ type Section,
11
+ } from "../schemas.js";
12
+
13
+ type OrganizeOpts = Pick<z.infer<typeof MirrorOptions>, "name" | "output"> & {
14
+ totalDiscovered?: number;
15
+ };
16
+
17
+ /**
18
+ * Extract section slug from URL path.
19
+ * Uses first path segment after domain, or "root" if none.
20
+ */
21
+ function extractSectionSlug(url: string): string {
22
+ try {
23
+ const parsed = new URL(url);
24
+ const segments = parsed.pathname.split("/").filter(Boolean);
25
+ if (segments.length === 0) return "root";
26
+ return segments[0].toLowerCase().replace(/[^a-z0-9-]/g, "-");
27
+ } catch {
28
+ return "root";
29
+ }
30
+ }
31
+
32
+ /**
33
+ * Make a section name human-readable from its slug.
34
+ */
35
+ function sectionName(slug: string): string {
36
+ if (slug === "root") return "Root";
37
+ return slug
38
+ .split("-")
39
+ .map((word) => word.charAt(0).toUpperCase() + word.slice(1))
40
+ .join(" ");
41
+ }
42
+
43
+ /**
44
+ * Extract topic tags from H1/H2 headings in content.
45
+ */
46
+ function extractTopicTags(content: string): string[] {
47
+ const headingRegex = /^#{1,2}\s+(.+)$/gm;
48
+ const tags = new Set<string>();
49
+
50
+ for (const match of content.matchAll(headingRegex)) {
51
+ const heading = match[1].trim().toLowerCase();
52
+ // Extract individual words as tags, filter short/common ones
53
+ const words = heading.split(/[\s/\\-]+/).filter((w) => w.length > 2);
54
+ for (const word of words) {
55
+ tags.add(word.replace(/[^a-z0-9]/g, ""));
56
+ }
57
+ }
58
+
59
+ // Filter empty strings
60
+ return [...tags].filter(Boolean);
61
+ }
62
+
63
+ /**
64
+ * Organize condensed pages into sections and build the doc set manifest.
65
+ * Groups pages by URL path prefix, generates topic tags, calculates coverage.
66
+ */
67
+ export async function organize(
68
+ pages: z.infer<typeof CondensedPage>[],
69
+ sourceUrl: string,
70
+ opts: OrganizeOpts,
71
+ ): Promise<z.infer<typeof DocSetManifest>> {
72
+ const now = new Date().toISOString();
73
+ const totalDiscovered = opts.totalDiscovered ?? pages.length;
74
+
75
+ // Group pages by section
76
+ const sectionMap = new Map<
77
+ string,
78
+ {
79
+ pages: z.infer<typeof PageRef>[];
80
+ allContent: string;
81
+ wordCountClean: number;
82
+ wordCountCondensed: number;
83
+ }
84
+ >();
85
+
86
+ for (const page of pages) {
87
+ const slug = extractSectionSlug(page.url);
88
+
89
+ if (!sectionMap.has(slug)) {
90
+ sectionMap.set(slug, {
91
+ pages: [],
92
+ allContent: "",
93
+ wordCountClean: 0,
94
+ wordCountCondensed: 0,
95
+ });
96
+ }
97
+
98
+ const section = sectionMap.get(slug);
99
+ if (!section) continue;
100
+
101
+ const pageRef: z.infer<typeof PageRef> = {
102
+ url: page.url,
103
+ slug: page.slug,
104
+ section: slug,
105
+ word_count_clean: page.word_count_clean,
106
+ word_count_condensed: page.word_count_condensed,
107
+ condensed: page.condensed,
108
+ };
109
+
110
+ section.pages.push(pageRef);
111
+ section.allContent += `\n${page.content_condensed}`;
112
+ section.wordCountClean += page.word_count_clean;
113
+ section.wordCountCondensed += page.word_count_condensed;
114
+ }
115
+
116
+ // Build sections
117
+ const sections: z.infer<typeof Section>[] = [];
118
+ for (const [slug, data] of sectionMap) {
119
+ sections.push({
120
+ name: sectionName(slug),
121
+ slug,
122
+ pages: data.pages,
123
+ word_count_clean: data.wordCountClean,
124
+ word_count_condensed: data.wordCountCondensed,
125
+ page_count: data.pages.length,
126
+ topic_tags: extractTopicTags(data.allContent),
127
+ });
128
+ }
129
+
130
+ // Build flat page list
131
+ const allPages = sections.flatMap((s) => s.pages);
132
+
133
+ // Calculate coverage
134
+ const coverage: z.infer<typeof Coverage> = {
135
+ total_discovered: totalDiscovered,
136
+ successfully_processed: pages.length,
137
+ skipped: totalDiscovered - pages.length,
138
+ skip_reasons: {},
139
+ coverage_ratio: totalDiscovered > 0 ? pages.length / totalDiscovered : 0,
140
+ };
141
+
142
+ // Calculate content hash
143
+ const allContent = pages.map((p) => p.content_condensed).join("\n");
144
+ const contentHash = createHash("sha256").update(allContent).digest("hex");
145
+
146
+ // Derive name from URL if not provided
147
+ let name = opts.name;
148
+ if (!name) {
149
+ try {
150
+ const parsed = new URL(sourceUrl);
151
+ name = parsed.hostname.replace(/^(www|docs)\./, "").replace(/\./g, "-");
152
+ } catch {
153
+ name = "unknown";
154
+ }
155
+ }
156
+
157
+ const manifest: z.infer<typeof DocSetManifest> = {
158
+ schema_version: SCHEMA_VERSION,
159
+ name,
160
+ version: null,
161
+ source_url: sourceUrl,
162
+ generated_at: now,
163
+ sections,
164
+ pages: allPages,
165
+ coverage,
166
+ provenance: {
167
+ source_url: sourceUrl,
168
+ fetched_at: now,
169
+ content_hash: contentHash,
170
+ license: null,
171
+ robots_txt_status: "allowed",
172
+ },
173
+ trust: {
174
+ freshness: "fresh",
175
+ last_checked: now,
176
+ last_changed: now,
177
+ check_interval_hours: DEFAULT_CHECK_INTERVAL_HOURS,
178
+ coverage,
179
+ },
180
+ platform_detected: null,
181
+ };
182
+
183
+ return DocSetManifest.parse(manifest);
184
+ }
@@ -0,0 +1,204 @@
1
+ import { z } from "zod";
2
+
3
+ // ── Schema version ──────────────────────────────
4
+ export const SCHEMA_VERSION = "1.0.0";
5
+
6
+ // ── Primitives ──────────────────────────────────
7
+
8
+ export const UrlString = z.string().url();
9
+ export const IsoTimestamp = z.string().datetime();
10
+ export const Slug = z.string().regex(/^[a-z0-9-]+$/);
11
+
12
+ // ── Content Integrity ───────────────────────────
13
+
14
+ export const ContentIntegrity = z.object({
15
+ unicode_normalized: z.boolean(),
16
+ structural_baseline: z.number().min(0).max(1),
17
+ flagged_anomalies: z.array(z.string()),
18
+ owasp_llm01_checked: z.boolean(),
19
+ pipeline_version: z.string(),
20
+ });
21
+
22
+ export const StructuralMetrics = z.object({
23
+ char_entropy: z.number(),
24
+ code_block_ratio: z.number().min(0).max(1),
25
+ avg_section_words: z.number(),
26
+ imperative_sentence_ratio: z.number().min(0).max(1),
27
+ total_tokens: z.number().int(),
28
+ });
29
+
30
+ // ── Pipeline Stage Outputs ──────────────────────
31
+
32
+ // Stage 1: Discover
33
+ export const DiscoveredUrl = z.object({
34
+ url: UrlString,
35
+ source: z.enum(["llms_txt", "llms_full_txt", "md_suffix", "sitemap", "firecrawl", "link_walk"]),
36
+ });
37
+
38
+ export const DiscoverResult = z.object({
39
+ urls: z.array(DiscoveredUrl),
40
+ source_method: z.string(),
41
+ total_found: z.number().int(),
42
+ after_dedup: z.number().int(),
43
+ platform_detected: z.string().nullable(),
44
+ robots_txt_status: z.enum(["allowed", "blocked", "no_robots_txt"]),
45
+ });
46
+
47
+ // Stage 2: Clean
48
+ export const CleanedPage = z.object({
49
+ url: UrlString,
50
+ slug: Slug,
51
+ section: z.string(),
52
+ content_clean: z.string(),
53
+ word_count: z.number().int(),
54
+ fetch_strategy: z.enum(["readability", "jina", "md_suffix", "llms_full_txt"]),
55
+ content_integrity: ContentIntegrity,
56
+ structural_metrics: StructuralMetrics,
57
+ });
58
+
59
+ // Stage 3: Condense
60
+ export const CondensedPage = z.object({
61
+ url: UrlString,
62
+ slug: Slug,
63
+ section: z.string(),
64
+ content_condensed: z.string(),
65
+ content_clean: z.string(),
66
+ word_count_clean: z.number().int(),
67
+ word_count_condensed: z.number().int(),
68
+ condensed: z.boolean(),
69
+ condense_method: z.enum(["claude_cli", "api", "skipped"]),
70
+ });
71
+
72
+ // Stage 4: Organize
73
+ export const PageRef = z.object({
74
+ url: UrlString,
75
+ slug: Slug,
76
+ section: z.string(),
77
+ word_count_clean: z.number().int(),
78
+ word_count_condensed: z.number().int(),
79
+ condensed: z.boolean(),
80
+ });
81
+
82
+ export const Section = z.object({
83
+ name: z.string(),
84
+ slug: Slug,
85
+ pages: z.array(PageRef),
86
+ word_count_clean: z.number().int(),
87
+ word_count_condensed: z.number().int(),
88
+ page_count: z.number().int(),
89
+ topic_tags: z.array(z.string()),
90
+ });
91
+
92
+ // ── Coverage & Provenance ───────────────────────
93
+
94
+ export const Coverage = z.object({
95
+ total_discovered: z.number().int(),
96
+ successfully_processed: z.number().int(),
97
+ skipped: z.number().int(),
98
+ skip_reasons: z.record(z.string(), z.number().int()),
99
+ coverage_ratio: z.number().min(0).max(1),
100
+ });
101
+
102
+ export const Provenance = z.object({
103
+ source_url: UrlString,
104
+ fetched_at: IsoTimestamp,
105
+ content_hash: z.string(),
106
+ license: z.string().nullable(),
107
+ robots_txt_status: z.enum(["allowed", "blocked", "no_robots_txt"]),
108
+ });
109
+
110
+ export const TrustMetadata = z.object({
111
+ freshness: z.enum(["fresh", "stale", "unknown"]),
112
+ last_checked: IsoTimestamp.nullable(),
113
+ last_changed: IsoTimestamp.nullable(),
114
+ check_interval_hours: z.number(),
115
+ coverage: Coverage,
116
+ });
117
+
118
+ // ── Storage (persisted to disk) ─────────────────
119
+
120
+ export const DocSetManifest = z.object({
121
+ schema_version: z.string(),
122
+ name: z.string(),
123
+ version: z.string().nullable(),
124
+ source_url: UrlString,
125
+ generated_at: IsoTimestamp,
126
+ sections: z.array(Section),
127
+ pages: z.array(PageRef),
128
+ coverage: Coverage,
129
+ provenance: Provenance,
130
+ trust: TrustMetadata,
131
+ platform_detected: z.string().nullable(),
132
+ });
133
+
134
+ // ── MCP Schemas ─────────────────────────────────
135
+
136
+ export const GetDocsRequest = z.object({
137
+ query: z.string().min(1),
138
+ library: z.string().optional(),
139
+ version: z.string().optional(),
140
+ project_path: z.string().optional(),
141
+ max_sections: z.number().int().min(1).max(20).default(5),
142
+ });
143
+
144
+ export const ServedSection = z.object({
145
+ title: z.string(),
146
+ content: z.string(),
147
+ provenance: Provenance,
148
+ content_integrity: ContentIntegrity,
149
+ });
150
+
151
+ export const GetDocsResponse = z.object({
152
+ sections: z.array(ServedSection),
153
+ trust: TrustMetadata,
154
+ library: z.string(),
155
+ version: z.string().nullable(),
156
+ query: z.string(),
157
+ });
158
+
159
+ export const ListLibrariesRequest = z.object({
160
+ filter: z.string().optional(),
161
+ });
162
+
163
+ export const LibrarySummary = z.object({
164
+ name: z.string(),
165
+ version: z.string().nullable(),
166
+ sections: z.number().int(),
167
+ pages: z.number().int(),
168
+ freshness: z.enum(["fresh", "stale", "unknown"]),
169
+ last_updated: IsoTimestamp,
170
+ word_count: z.number().int(),
171
+ });
172
+
173
+ export const ListLibrariesResponse = z.object({
174
+ libraries: z.array(LibrarySummary),
175
+ });
176
+
177
+ // ── Telemetry ───────────────────────────────────
178
+
179
+ export const TelemetryEvent = z.object({
180
+ event: z.enum(["query", "index", "error"]),
181
+ library: z.string(),
182
+ sections_hit: z.array(z.string()),
183
+ latency_ms: z.number(),
184
+ error_type: z.string().optional(),
185
+ client_version: z.string(),
186
+ timestamp: IsoTimestamp,
187
+ });
188
+
189
+ // ── Pipeline Options ────────────────────────────
190
+
191
+ export const MirrorOptions = z.object({
192
+ url: UrlString,
193
+ name: z.string().optional(),
194
+ condense: z.boolean().default(false),
195
+ condenseApi: z.boolean().default(false),
196
+ concurrency: z.number().int().min(1).max(20).default(3),
197
+ excludePath: z.array(z.string()).default([]),
198
+ exclude: z.array(z.number().int()).default([]),
199
+ smart: z.string().optional(),
200
+ filter: z.string().optional(),
201
+ top: z.number().int().default(20),
202
+ fetchStrategy: z.enum(["auto", "readability", "jina"]).default("auto"),
203
+ output: z.string().optional(),
204
+ });
@@ -0,0 +1,22 @@
1
+ /**
2
+ * Bun.spawn wrappers — extracted for testability.
3
+ * Tests can vi.mock this module without touching the readonly Bun global.
4
+ */
5
+
6
+ /** Spawn a CLI subprocess with stdin input. Used by the condense pipeline. */
7
+ export function spawnCli(
8
+ cmd: string[],
9
+ opts: { stdin: Blob },
10
+ ): { exited: Promise<number>; stdout: ReadableStream<Uint8Array> } {
11
+ const proc = Bun.spawn(cmd, opts);
12
+ return { exited: proc.exited, stdout: proc.stdout as ReadableStream<Uint8Array> };
13
+ }
14
+
15
+ /** Spawn a detached background process. Used by the daemon. */
16
+ export function spawnDetached(
17
+ cmd: string[],
18
+ opts: { stdio: ["ignore", "ignore", "inherit"]; detached: true },
19
+ ): { pid: number; unref: () => void } {
20
+ const child = Bun.spawn(cmd, opts);
21
+ return { pid: child.pid, unref: () => child.unref() };
22
+ }
@@ -0,0 +1,108 @@
1
+ import { randomUUID } from "node:crypto";
2
+ import { mkdir, readFile, readdir, rename, rm, writeFile } from "node:fs/promises";
3
+ import { join } from "node:path";
4
+ import type { z } from "zod";
5
+ import { SCHEMA_VERSION } from "../constants.js";
6
+ import { DocSetManifest } from "../schemas.js";
7
+ import { docSetDir, librariesDir, manifestPath } from "./paths.js";
8
+
9
+ /**
10
+ * Read a doc set manifest and its sections from disk.
11
+ * Validates against current schema version.
12
+ */
13
+ export async function readDocSet(
14
+ name: string,
15
+ version?: string,
16
+ ): Promise<z.infer<typeof DocSetManifest>> {
17
+ const mPath = manifestPath(name, version ?? null);
18
+ const raw = await readFile(mPath, "utf-8");
19
+ const data = JSON.parse(raw);
20
+ const manifest = DocSetManifest.parse(data);
21
+
22
+ if (manifest.schema_version !== SCHEMA_VERSION) {
23
+ console.warn(
24
+ `[struth] Warning: doc set "${name}" has schema version ${manifest.schema_version}, current is ${SCHEMA_VERSION}`,
25
+ );
26
+ }
27
+
28
+ return manifest;
29
+ }
30
+
31
+ /**
32
+ * Write a doc set manifest and its sections to disk.
33
+ * Uses atomic write: temp dir → rename on success.
34
+ */
35
+ export async function writeDocSet(
36
+ manifest: z.infer<typeof DocSetManifest>,
37
+ pages: { slug: string; clean: string; condensed: string }[],
38
+ ): Promise<void> {
39
+ const targetDir = docSetDir(manifest.name, manifest.version);
40
+ const tmpSuffix = randomUUID().slice(0, 8);
41
+ const tempDir = `${targetDir}.tmp.${tmpSuffix}`;
42
+
43
+ try {
44
+ const tempSectionsDir = join(tempDir, "sections");
45
+ await mkdir(tempSectionsDir, { recursive: true });
46
+
47
+ // Write manifest
48
+ await writeFile(join(tempDir, "manifest.json"), JSON.stringify(manifest, null, 2), "utf-8");
49
+
50
+ // Write section files
51
+ for (const page of pages) {
52
+ await writeFile(join(tempSectionsDir, `${page.slug}.clean.md`), page.clean, "utf-8");
53
+ await writeFile(join(tempSectionsDir, `${page.slug}.condensed.md`), page.condensed, "utf-8");
54
+ }
55
+
56
+ // Atomic swap: remove old target, rename temp to target
57
+ try {
58
+ await rm(targetDir, { recursive: true, force: true });
59
+ } catch {
60
+ // Target didn't exist — fine
61
+ }
62
+ await rename(tempDir, targetDir);
63
+ } catch (err) {
64
+ // Cleanup temp dir on failure
65
+ try {
66
+ await rm(tempDir, { recursive: true, force: true });
67
+ } catch {
68
+ // Best effort cleanup
69
+ }
70
+ throw err;
71
+ }
72
+ }
73
+
74
+ /**
75
+ * List all indexed doc sets.
76
+ */
77
+ export async function listDocSets(): Promise<
78
+ { name: string; version: string | null; path: string }[]
79
+ > {
80
+ const libDir = librariesDir();
81
+
82
+ let entries: string[];
83
+ try {
84
+ entries = await readdir(libDir);
85
+ } catch {
86
+ return [];
87
+ }
88
+
89
+ const results: { name: string; version: string | null; path: string }[] = [];
90
+
91
+ for (const entry of entries) {
92
+ const entryPath = join(libDir, entry);
93
+ const mPath = join(entryPath, "manifest.json");
94
+ try {
95
+ const raw = await readFile(mPath, "utf-8");
96
+ const data = JSON.parse(raw);
97
+ results.push({
98
+ name: data.name ?? entry,
99
+ version: data.version ?? null,
100
+ path: entryPath,
101
+ });
102
+ } catch {
103
+ // Skip entries without valid manifest
104
+ }
105
+ }
106
+
107
+ return results;
108
+ }
@@ -0,0 +1,40 @@
1
+ import { homedir } from "node:os";
2
+ import { join } from "node:path";
3
+ import { LIBRARIES_DIR } from "../constants.js";
4
+
5
+ /**
6
+ * Resolve the struth home directory.
7
+ * Expands ~ to the user's home directory.
8
+ */
9
+ export function struthHome(): string {
10
+ return join(homedir(), ".struth");
11
+ }
12
+
13
+ /**
14
+ * Resolve the libraries directory.
15
+ */
16
+ export function librariesDir(): string {
17
+ return join(struthHome(), LIBRARIES_DIR);
18
+ }
19
+
20
+ /**
21
+ * Resolve the path for a specific doc set.
22
+ */
23
+ export function docSetDir(name: string, version?: string | null): string {
24
+ const slug = version ? `${name}-${version}` : name;
25
+ return join(librariesDir(), slug);
26
+ }
27
+
28
+ /**
29
+ * Resolve the manifest path for a doc set.
30
+ */
31
+ export function manifestPath(name: string, version?: string | null): string {
32
+ return join(docSetDir(name, version), "manifest.json");
33
+ }
34
+
35
+ /**
36
+ * Resolve the sections directory for a doc set.
37
+ */
38
+ export function sectionsDir(name: string, version?: string | null): string {
39
+ return join(docSetDir(name, version), "sections");
40
+ }
@@ -0,0 +1,36 @@
1
+ import type { z } from "zod";
2
+ import type * as S from "./schemas.js";
3
+
4
+ // Pipeline stage types
5
+ export type DiscoveredUrl = z.infer<typeof S.DiscoveredUrl>;
6
+ export type DiscoverResult = z.infer<typeof S.DiscoverResult>;
7
+ export type CleanedPage = z.infer<typeof S.CleanedPage>;
8
+ export type CondensedPage = z.infer<typeof S.CondensedPage>;
9
+ export type PageRef = z.infer<typeof S.PageRef>;
10
+ export type Section = z.infer<typeof S.Section>;
11
+
12
+ // Integrity types
13
+ export type ContentIntegrity = z.infer<typeof S.ContentIntegrity>;
14
+ export type StructuralMetrics = z.infer<typeof S.StructuralMetrics>;
15
+
16
+ // Coverage & provenance types
17
+ export type Coverage = z.infer<typeof S.Coverage>;
18
+ export type Provenance = z.infer<typeof S.Provenance>;
19
+ export type TrustMetadata = z.infer<typeof S.TrustMetadata>;
20
+
21
+ // Storage types
22
+ export type DocSetManifest = z.infer<typeof S.DocSetManifest>;
23
+
24
+ // MCP types
25
+ export type GetDocsRequest = z.infer<typeof S.GetDocsRequest>;
26
+ export type GetDocsResponse = z.infer<typeof S.GetDocsResponse>;
27
+ export type ServedSection = z.infer<typeof S.ServedSection>;
28
+ export type ListLibrariesRequest = z.infer<typeof S.ListLibrariesRequest>;
29
+ export type ListLibrariesResponse = z.infer<typeof S.ListLibrariesResponse>;
30
+ export type LibrarySummary = z.infer<typeof S.LibrarySummary>;
31
+
32
+ // Telemetry types
33
+ export type TelemetryEvent = z.infer<typeof S.TelemetryEvent>;
34
+
35
+ // Options types
36
+ export type MirrorOptions = z.infer<typeof S.MirrorOptions>;
@@ -0,0 +1,95 @@
1
+ import { readFile, rm, writeFile } from "node:fs/promises";
2
+ import { join } from "node:path";
3
+ import { spawnDetached } from "../core/spawn.js";
4
+ import { struthHome } from "../core/storage/paths.js";
5
+
6
+ function pidFilePath(): string {
7
+ return join(struthHome(), "daemon.pid");
8
+ }
9
+
10
+ /**
11
+ * Check whether the daemon process is currently running.
12
+ */
13
+ export async function isDaemonRunning(): Promise<boolean> {
14
+ try {
15
+ const raw = await readFile(pidFilePath(), "utf-8");
16
+ const pid = Number.parseInt(raw.trim(), 10);
17
+ process.kill(pid, 0);
18
+ return true;
19
+ } catch {
20
+ // No PID file or process not running — clean up stale file
21
+ try {
22
+ await rm(pidFilePath());
23
+ } catch {
24
+ // No file to remove
25
+ }
26
+ return false;
27
+ }
28
+ }
29
+
30
+ /**
31
+ * Start the daemon as a detached background process.
32
+ */
33
+ export async function startDaemon(intervalHours = 24): Promise<void> {
34
+ if (await isDaemonRunning()) {
35
+ process.stderr.write("Daemon already running\n");
36
+ return;
37
+ }
38
+
39
+ const cliPath = join(import.meta.dir ?? ".", "../cli/index.ts");
40
+ const child = spawnDetached(
41
+ ["bun", "run", cliPath, "daemon", "run", "--interval", String(intervalHours)],
42
+ { stdio: ["ignore", "ignore", "inherit"], detached: true },
43
+ );
44
+
45
+ child.unref();
46
+
47
+ await writeFile(pidFilePath(), String(child.pid), "utf-8");
48
+ process.stderr.write(`Daemon started (PID: ${child.pid}, interval: ${intervalHours}h)\n`);
49
+ }
50
+
51
+ /**
52
+ * Stop the running daemon process.
53
+ */
54
+ export async function stopDaemon(): Promise<void> {
55
+ let pid: number;
56
+ try {
57
+ const raw = await readFile(pidFilePath(), "utf-8");
58
+ pid = Number.parseInt(raw.trim(), 10);
59
+ } catch {
60
+ process.stderr.write("No daemon running\n");
61
+ return;
62
+ }
63
+
64
+ try {
65
+ process.kill(pid, 0);
66
+ // Process is alive — send SIGTERM
67
+ process.kill(pid, "SIGTERM");
68
+ process.stderr.write(`Daemon stopped (PID: ${pid})\n`);
69
+ } catch {
70
+ // Process not alive — stale PID
71
+ process.stderr.write("Cleaned up stale PID file\n");
72
+ }
73
+
74
+ await rm(pidFilePath());
75
+ }
76
+
77
+ /**
78
+ * Run the daemon loop (called by the spawned process).
79
+ */
80
+ export async function runDaemonLoop(intervalHours: number): Promise<never> {
81
+ const { cleanupOrphans, refreshAll } = await import("./refresh.js");
82
+
83
+ await cleanupOrphans();
84
+ process.stderr.write(`Daemon starting, interval: ${intervalHours}h\n`);
85
+
86
+ process.on("SIGTERM", () => {
87
+ process.stderr.write("Shutting down...\n");
88
+ process.exit(0);
89
+ });
90
+
91
+ while (true) {
92
+ await refreshAll();
93
+ await Bun.sleep(intervalHours * 3600 * 1000);
94
+ }
95
+ }