@voidwire/lore 0.9.1 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cli.ts +66 -1
- package/lib/config.ts +134 -0
- package/lib/indexer.ts +213 -0
- package/lib/indexers/blogs.ts +146 -0
- package/lib/indexers/captures.ts +105 -0
- package/lib/indexers/commits.ts +90 -0
- package/lib/indexers/development.ts +68 -0
- package/lib/indexers/events.ts +61 -0
- package/lib/indexers/explorations.ts +89 -0
- package/lib/indexers/flux.ts +142 -0
- package/lib/indexers/index.ts +41 -0
- package/lib/indexers/insights.ts +53 -0
- package/lib/indexers/learnings.ts +53 -0
- package/lib/indexers/observations.ts +53 -0
- package/lib/indexers/obsidian.ts +151 -0
- package/lib/indexers/personal.ts +262 -0
- package/lib/indexers/readmes.ts +49 -0
- package/lib/indexers/sessions.ts +127 -0
- package/lib/indexers/teachings.ts +52 -0
- package/lib/info.ts +3 -3
- package/lib/list.ts +23 -32
- package/lib/projects.ts +26 -31
- package/lib/realtime.ts +15 -15
- package/lib/search.ts +5 -7
- package/lib/semantic.ts +6 -31
- package/package.json +3 -2
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* lib/indexers/commits.ts - Git commits indexer
|
|
3
|
+
*
|
|
4
|
+
* Scans all project directories for git repos and indexes commit history.
|
|
5
|
+
* Uses record/unit separators to avoid delimiter collisions in messages.
|
|
6
|
+
*
|
|
7
|
+
* Source: commits
|
|
8
|
+
* Topic: project directory name (repo name)
|
|
9
|
+
* Type: (empty)
|
|
10
|
+
* Timestamp: commit author date as ISO 8601
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import { readdirSync, existsSync } from "fs";
|
|
14
|
+
import { join } from "path";
|
|
15
|
+
import { spawnSync } from "child_process";
|
|
16
|
+
import type { IndexerContext } from "../indexer";
|
|
17
|
+
|
|
18
|
+
export async function indexCommits(ctx: IndexerContext): Promise<void> {
|
|
19
|
+
const projectsDir = ctx.config.paths.projects;
|
|
20
|
+
|
|
21
|
+
if (!existsSync(projectsDir)) {
|
|
22
|
+
console.log(`Projects directory not found: ${projectsDir}`);
|
|
23
|
+
return;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
const projects = readdirSync(projectsDir, { withFileTypes: true })
|
|
27
|
+
.filter((dirent) => dirent.isDirectory())
|
|
28
|
+
.map((dirent) => dirent.name);
|
|
29
|
+
|
|
30
|
+
for (const project of projects) {
|
|
31
|
+
const repoPath = join(projectsDir, project);
|
|
32
|
+
const gitDir = join(repoPath, ".git");
|
|
33
|
+
|
|
34
|
+
if (!existsSync(gitDir)) continue;
|
|
35
|
+
|
|
36
|
+
try {
|
|
37
|
+
// Use record separator (%x1e) and unit separator (%x1f) to avoid
|
|
38
|
+
// delimiter collisions with commit message content
|
|
39
|
+
const SEP = "\x1e"; // Record separator between commits
|
|
40
|
+
const UNIT = "\x1f"; // Unit separator between fields
|
|
41
|
+
const result = spawnSync(
|
|
42
|
+
"git",
|
|
43
|
+
[
|
|
44
|
+
"log",
|
|
45
|
+
"--all",
|
|
46
|
+
`--format=${SEP}%H${UNIT}%an${UNIT}%aI${UNIT}%s${UNIT}%b`,
|
|
47
|
+
],
|
|
48
|
+
{
|
|
49
|
+
cwd: repoPath,
|
|
50
|
+
encoding: "utf-8",
|
|
51
|
+
maxBuffer: 10 * 1024 * 1024, // 10MB buffer
|
|
52
|
+
},
|
|
53
|
+
);
|
|
54
|
+
|
|
55
|
+
if (result.error || result.status !== 0) {
|
|
56
|
+
console.warn(`Failed to read git log for ${project}: ${result.stderr}`);
|
|
57
|
+
continue;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
const records = result.stdout.split(SEP).filter(Boolean);
|
|
61
|
+
|
|
62
|
+
for (const record of records) {
|
|
63
|
+
const parts = record.split(UNIT);
|
|
64
|
+
if (parts.length < 4) continue;
|
|
65
|
+
|
|
66
|
+
const [sha, author, timestamp, subject, ...bodyParts] = parts;
|
|
67
|
+
const body = bodyParts.join("").trim();
|
|
68
|
+
const baseContent = body || subject;
|
|
69
|
+
const content = author
|
|
70
|
+
? `Author: ${author}\n${baseContent}`
|
|
71
|
+
: baseContent;
|
|
72
|
+
|
|
73
|
+
ctx.insert({
|
|
74
|
+
source: "commits",
|
|
75
|
+
title: `[commit] ${subject}`,
|
|
76
|
+
content,
|
|
77
|
+
topic: project,
|
|
78
|
+
timestamp,
|
|
79
|
+
metadata: {
|
|
80
|
+
sha: sha.trim(),
|
|
81
|
+
author,
|
|
82
|
+
},
|
|
83
|
+
});
|
|
84
|
+
}
|
|
85
|
+
} catch (e) {
|
|
86
|
+
console.warn(`Failed to index commits for ${project}: ${e}`);
|
|
87
|
+
continue;
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
}
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* lib/indexers/development.ts - Development projects indexer
|
|
3
|
+
*
|
|
4
|
+
* Scans project directories for .workflow/artifacts/PROJECT_SUMMARY.md
|
|
5
|
+
* and indexes project summaries.
|
|
6
|
+
*
|
|
7
|
+
* Source: development, Topic: project directory name,
|
|
8
|
+
* Type: empty, Timestamp: file mtime as ISO 8601
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import { readdirSync, readFileSync, statSync, existsSync } from "fs";
|
|
12
|
+
import { join } from "path";
|
|
13
|
+
import type { IndexerContext } from "../indexer";
|
|
14
|
+
|
|
15
|
+
export async function indexDevelopment(ctx: IndexerContext): Promise<void> {
|
|
16
|
+
const projectsDir = ctx.config.paths.projects;
|
|
17
|
+
|
|
18
|
+
if (!existsSync(projectsDir)) {
|
|
19
|
+
console.log(`Projects directory not found: ${projectsDir}`);
|
|
20
|
+
return;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
const projects = readdirSync(projectsDir, { withFileTypes: true })
|
|
24
|
+
.filter((dirent) => dirent.isDirectory())
|
|
25
|
+
.map((dirent) => dirent.name);
|
|
26
|
+
|
|
27
|
+
for (const project of projects) {
|
|
28
|
+
const summaryPath = join(
|
|
29
|
+
projectsDir,
|
|
30
|
+
project,
|
|
31
|
+
".workflow",
|
|
32
|
+
"artifacts",
|
|
33
|
+
"PROJECT_SUMMARY.md",
|
|
34
|
+
);
|
|
35
|
+
|
|
36
|
+
if (!existsSync(summaryPath)) continue;
|
|
37
|
+
|
|
38
|
+
try {
|
|
39
|
+
const raw = readFileSync(summaryPath, "utf-8");
|
|
40
|
+
const mtime = statSync(summaryPath).mtime;
|
|
41
|
+
const timestamp = mtime.toISOString();
|
|
42
|
+
|
|
43
|
+
// Extract tech from **Stack:** line (matches bash script behavior)
|
|
44
|
+
let content = raw;
|
|
45
|
+
let tech: string | undefined;
|
|
46
|
+
|
|
47
|
+
const techMatch = raw.match(/^\*\*Stack:\*\*\s*(.+)$/m);
|
|
48
|
+
if (techMatch) {
|
|
49
|
+
tech = techMatch[1].trim();
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
const metadata: Record<string, unknown> = {};
|
|
53
|
+
if (tech) metadata.tech = tech;
|
|
54
|
+
|
|
55
|
+
ctx.insert({
|
|
56
|
+
source: "development",
|
|
57
|
+
title: `Project: ${project}`,
|
|
58
|
+
content,
|
|
59
|
+
topic: project,
|
|
60
|
+
timestamp,
|
|
61
|
+
metadata: Object.keys(metadata).length > 0 ? metadata : undefined,
|
|
62
|
+
});
|
|
63
|
+
} catch (e) {
|
|
64
|
+
console.warn(`Failed to read ${summaryPath}: ${e}`);
|
|
65
|
+
continue;
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
}
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* lib/indexers/events.ts - Events indexer
|
|
3
|
+
*
|
|
4
|
+
* Aggregates development events from log.jsonl by project.
|
|
5
|
+
* Each project gets one entry with all event lines.
|
|
6
|
+
*
|
|
7
|
+
* Source: events
|
|
8
|
+
* Topic: project name
|
|
9
|
+
* Type: (empty)
|
|
10
|
+
* Timestamp: last event timestamp per project
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import { readFileSync, existsSync } from "fs";
|
|
14
|
+
import type { IndexerContext } from "../indexer";
|
|
15
|
+
|
|
16
|
+
export async function indexEvents(ctx: IndexerContext): Promise<void> {
|
|
17
|
+
const logPath = `${ctx.config.paths.data}/log.jsonl`;
|
|
18
|
+
|
|
19
|
+
if (!existsSync(logPath)) {
|
|
20
|
+
console.log("No log.jsonl found, skipping events");
|
|
21
|
+
return;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
const lines = readFileSync(logPath, "utf-8").split("\n").filter(Boolean);
|
|
25
|
+
const projectData = new Map<
|
|
26
|
+
string,
|
|
27
|
+
{ lines: string[]; lastTimestamp: string }
|
|
28
|
+
>();
|
|
29
|
+
|
|
30
|
+
for (const line of lines) {
|
|
31
|
+
try {
|
|
32
|
+
const event = JSON.parse(line);
|
|
33
|
+
const project = event.data?.topic || "general";
|
|
34
|
+
if (!projectData.has(project)) {
|
|
35
|
+
projectData.set(project, { lines: [], lastTimestamp: "" });
|
|
36
|
+
}
|
|
37
|
+
const data = projectData.get(project)!;
|
|
38
|
+
data.lines.push(
|
|
39
|
+
`[${event.timestamp}] ${event.event}: ${event.type || ""}`,
|
|
40
|
+
);
|
|
41
|
+
if (event.timestamp) {
|
|
42
|
+
data.lastTimestamp = event.timestamp;
|
|
43
|
+
}
|
|
44
|
+
} catch {
|
|
45
|
+
// Skip malformed JSON
|
|
46
|
+
continue;
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
for (const [project, data] of projectData) {
|
|
51
|
+
const content = data.lines.join("\n");
|
|
52
|
+
|
|
53
|
+
ctx.insert({
|
|
54
|
+
source: "events",
|
|
55
|
+
title: `Development events: ${project}`,
|
|
56
|
+
content,
|
|
57
|
+
topic: project,
|
|
58
|
+
timestamp: data.lastTimestamp,
|
|
59
|
+
});
|
|
60
|
+
}
|
|
61
|
+
}
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* lib/indexers/explorations.ts - Explorations indexer
|
|
3
|
+
*
|
|
4
|
+
* Recursively scans explorations directory for markdown files.
|
|
5
|
+
* Extracts project and status from frontmatter when available.
|
|
6
|
+
*
|
|
7
|
+
* Source: explorations
|
|
8
|
+
* Topic: frontmatter project or parent directory name
|
|
9
|
+
* Type: (empty)
|
|
10
|
+
* Timestamp: file mtime as ISO 8601
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import { readdirSync, readFileSync, statSync, existsSync } from "fs";
|
|
14
|
+
import { join, basename, dirname } from "path";
|
|
15
|
+
import type { IndexerContext } from "../indexer";
|
|
16
|
+
|
|
17
|
+
function walkMarkdownFiles(dir: string, files: string[] = []): string[] {
|
|
18
|
+
const entries = readdirSync(dir, { withFileTypes: true });
|
|
19
|
+
|
|
20
|
+
for (const entry of entries) {
|
|
21
|
+
const fullPath = join(dir, entry.name);
|
|
22
|
+
|
|
23
|
+
if (entry.isDirectory()) {
|
|
24
|
+
walkMarkdownFiles(fullPath, files);
|
|
25
|
+
} else if (entry.isFile() && entry.name.endsWith(".md")) {
|
|
26
|
+
files.push(fullPath);
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
return files;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export async function indexExplorations(ctx: IndexerContext): Promise<void> {
|
|
34
|
+
const explorationsDir = ctx.config.paths.explorations;
|
|
35
|
+
|
|
36
|
+
if (!existsSync(explorationsDir)) {
|
|
37
|
+
console.log(`Explorations directory not found: ${explorationsDir}`);
|
|
38
|
+
return;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
const files = walkMarkdownFiles(explorationsDir);
|
|
42
|
+
|
|
43
|
+
for (const filePath of files) {
|
|
44
|
+
try {
|
|
45
|
+
const raw = readFileSync(filePath, "utf-8");
|
|
46
|
+
const mtime = statSync(filePath).mtime;
|
|
47
|
+
const timestamp = mtime.toISOString();
|
|
48
|
+
|
|
49
|
+
let content = raw;
|
|
50
|
+
let project: string | undefined;
|
|
51
|
+
let status: string | undefined;
|
|
52
|
+
|
|
53
|
+
// Extract frontmatter
|
|
54
|
+
const frontmatterMatch = raw.match(/^---\n([\s\S]*?)\n---\n/);
|
|
55
|
+
if (frontmatterMatch) {
|
|
56
|
+
const frontmatter = frontmatterMatch[1];
|
|
57
|
+
const projectMatch = frontmatter.match(/^project:\s*(.+)$/m);
|
|
58
|
+
const statusMatch = frontmatter.match(/^status:\s*(.+)$/m);
|
|
59
|
+
|
|
60
|
+
if (projectMatch) project = projectMatch[1].trim();
|
|
61
|
+
if (statusMatch) status = statusMatch[1].trim();
|
|
62
|
+
|
|
63
|
+
content = raw.slice(frontmatterMatch[0].length);
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
// Fallback: use parent directory name as project
|
|
67
|
+
if (!project) {
|
|
68
|
+
project = basename(dirname(filePath));
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
const metadata: Record<string, unknown> = {};
|
|
72
|
+
if (status) metadata.status = status;
|
|
73
|
+
|
|
74
|
+
const title = basename(filePath, ".md");
|
|
75
|
+
|
|
76
|
+
ctx.insert({
|
|
77
|
+
source: "explorations",
|
|
78
|
+
title: `[exploration] ${title}`,
|
|
79
|
+
content,
|
|
80
|
+
topic: project,
|
|
81
|
+
timestamp,
|
|
82
|
+
metadata: Object.keys(metadata).length > 0 ? metadata : undefined,
|
|
83
|
+
});
|
|
84
|
+
} catch (e) {
|
|
85
|
+
console.warn(`Failed to read ${filePath}: ${e}`);
|
|
86
|
+
continue;
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
}
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* lib/indexers/flux.ts - Flux indexer
|
|
3
|
+
*
|
|
4
|
+
* Scans flux markdown files for todo and idea entries.
|
|
5
|
+
* Two passes: general flux (no project) and per-project flux.
|
|
6
|
+
* Line format: `- todo:: description id::xxx captured::date`
|
|
7
|
+
* `- idea:: description id::xxx`
|
|
8
|
+
*
|
|
9
|
+
* Source: flux
|
|
10
|
+
* Topic: project directory name, or "general" for non-project items
|
|
11
|
+
* Type: todo or idea
|
|
12
|
+
* Timestamp: captured date if present, otherwise empty
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
import { readdirSync, readFileSync, existsSync } from "fs";
|
|
16
|
+
import { join, basename } from "path";
|
|
17
|
+
import type { IndexerContext } from "../indexer";
|
|
18
|
+
|
|
19
|
+
export async function indexFlux(ctx: IndexerContext): Promise<void> {
|
|
20
|
+
const fluxDir = ctx.config.paths.flux;
|
|
21
|
+
const fluxProjectsDir = ctx.config.paths.flux_projects;
|
|
22
|
+
let found = false;
|
|
23
|
+
|
|
24
|
+
// Pass 1: General flux files (no project association)
|
|
25
|
+
if (fluxDir && existsSync(fluxDir)) {
|
|
26
|
+
found = true;
|
|
27
|
+
const files = readdirSync(fluxDir).filter((f) => f.endsWith(".md"));
|
|
28
|
+
for (const file of files) {
|
|
29
|
+
const filePath = join(fluxDir, file);
|
|
30
|
+
const status = statusFromFilename(basename(file, ".md"));
|
|
31
|
+
try {
|
|
32
|
+
parseFluxFile(ctx, filePath, "general", status);
|
|
33
|
+
} catch (e) {
|
|
34
|
+
console.warn(`Failed to read ${filePath}: ${e}`);
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
// Pass 2: Per-project flux files (active.md, later.md)
|
|
40
|
+
if (fluxProjectsDir && existsSync(fluxProjectsDir)) {
|
|
41
|
+
found = true;
|
|
42
|
+
const projects = readdirSync(fluxProjectsDir, { withFileTypes: true })
|
|
43
|
+
.filter((d) => d.isDirectory())
|
|
44
|
+
.map((d) => d.name);
|
|
45
|
+
|
|
46
|
+
for (const project of projects) {
|
|
47
|
+
for (const filename of ["active.md", "later.md"]) {
|
|
48
|
+
const filePath = join(fluxProjectsDir, project, filename);
|
|
49
|
+
if (!existsSync(filePath)) continue;
|
|
50
|
+
const status = statusFromFilename(basename(filename, ".md"));
|
|
51
|
+
try {
|
|
52
|
+
parseFluxFile(ctx, filePath, project, status);
|
|
53
|
+
} catch (e) {
|
|
54
|
+
console.warn(`Failed to read ${filePath}: ${e}`);
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
if (!found) {
|
|
61
|
+
console.log("No flux directories found, skipping flux");
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
function statusFromFilename(name: string): string {
|
|
66
|
+
switch (name) {
|
|
67
|
+
case "active":
|
|
68
|
+
return "active";
|
|
69
|
+
case "later":
|
|
70
|
+
return "later";
|
|
71
|
+
case "inbox":
|
|
72
|
+
return "inbox";
|
|
73
|
+
default:
|
|
74
|
+
return "other";
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
function parseFluxFile(
|
|
79
|
+
ctx: IndexerContext,
|
|
80
|
+
filePath: string,
|
|
81
|
+
topic: string,
|
|
82
|
+
status: string,
|
|
83
|
+
): void {
|
|
84
|
+
const raw = readFileSync(filePath, "utf-8");
|
|
85
|
+
const lines = raw.split("\n");
|
|
86
|
+
|
|
87
|
+
for (const line of lines) {
|
|
88
|
+
const match = line.match(/^- (todo|idea):: (.+)/);
|
|
89
|
+
if (!match) continue;
|
|
90
|
+
|
|
91
|
+
const type = match[1];
|
|
92
|
+
let rest = match[2];
|
|
93
|
+
|
|
94
|
+
// Extract id if present
|
|
95
|
+
let id = "";
|
|
96
|
+
const idMatch = rest.match(/\bid::(\S+)/);
|
|
97
|
+
if (idMatch) {
|
|
98
|
+
id = idMatch[1];
|
|
99
|
+
rest = rest.replace(/\s*id::\S+/, "");
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// Extract captured date if present (may include time: captured:: 2025-08-13 10:52)
|
|
103
|
+
let timestamp = "";
|
|
104
|
+
const capturedMatch = rest.match(
|
|
105
|
+
/\bcaptured::\s*(\d{4}-\d{2}-\d{2})(?:\s+\d{2}:\d{2})?/,
|
|
106
|
+
);
|
|
107
|
+
if (capturedMatch) {
|
|
108
|
+
timestamp = `${capturedMatch[1]}T00:00:00Z`;
|
|
109
|
+
rest = rest.replace(
|
|
110
|
+
/\s*captured::\s*\d{4}-\d{2}-\d{2}(?:\s+\d{2}:\d{2})?/,
|
|
111
|
+
"",
|
|
112
|
+
);
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// Extract archived date if present (strip from description)
|
|
116
|
+
rest = rest.replace(/\s*archived::\s*\S+/, "");
|
|
117
|
+
|
|
118
|
+
// Extract any remaining key::value pairs (like last::date)
|
|
119
|
+
rest = rest.replace(/\s*\w+::\s*\S+/g, "");
|
|
120
|
+
|
|
121
|
+
const description = rest.trim();
|
|
122
|
+
if (!description) continue;
|
|
123
|
+
|
|
124
|
+
const title =
|
|
125
|
+
topic !== "general"
|
|
126
|
+
? `[${topic}] [${type}] ${description.slice(0, 80)}`
|
|
127
|
+
: `[${type}] ${description.slice(0, 80)}`;
|
|
128
|
+
|
|
129
|
+
ctx.insert({
|
|
130
|
+
source: "flux",
|
|
131
|
+
title,
|
|
132
|
+
content: description,
|
|
133
|
+
topic,
|
|
134
|
+
type,
|
|
135
|
+
timestamp,
|
|
136
|
+
metadata: {
|
|
137
|
+
id,
|
|
138
|
+
status,
|
|
139
|
+
},
|
|
140
|
+
});
|
|
141
|
+
}
|
|
142
|
+
}
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* lib/indexers/index.ts - Indexer registry
|
|
3
|
+
*
|
|
4
|
+
* Maps source names to indexer functions.
|
|
5
|
+
* Populated by tasks 3.1, 3.2, 3.3.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import type { IndexerFunction } from "../indexer";
|
|
9
|
+
import { indexEvents } from "./events";
|
|
10
|
+
import { indexLearnings } from "./learnings";
|
|
11
|
+
import { indexReadmes } from "./readmes";
|
|
12
|
+
import { indexDevelopment } from "./development";
|
|
13
|
+
import { indexCaptures } from "./captures";
|
|
14
|
+
import { indexTeachings } from "./teachings";
|
|
15
|
+
import { indexInsights } from "./insights";
|
|
16
|
+
import { indexObservations } from "./observations";
|
|
17
|
+
import { indexExplorations } from "./explorations";
|
|
18
|
+
import { indexSessions } from "./sessions";
|
|
19
|
+
import { indexFlux } from "./flux";
|
|
20
|
+
import { indexObsidian } from "./obsidian";
|
|
21
|
+
import { indexCommits } from "./commits";
|
|
22
|
+
import { indexBlogs } from "./blogs";
|
|
23
|
+
import { indexPersonal } from "./personal";
|
|
24
|
+
|
|
25
|
+
export const indexers: Record<string, IndexerFunction> = {
|
|
26
|
+
events: indexEvents,
|
|
27
|
+
learnings: indexLearnings,
|
|
28
|
+
readmes: indexReadmes,
|
|
29
|
+
development: indexDevelopment,
|
|
30
|
+
captures: indexCaptures,
|
|
31
|
+
teachings: indexTeachings,
|
|
32
|
+
insights: indexInsights,
|
|
33
|
+
observations: indexObservations,
|
|
34
|
+
explorations: indexExplorations,
|
|
35
|
+
sessions: indexSessions,
|
|
36
|
+
flux: indexFlux,
|
|
37
|
+
obsidian: indexObsidian,
|
|
38
|
+
commits: indexCommits,
|
|
39
|
+
blogs: indexBlogs,
|
|
40
|
+
personal: indexPersonal,
|
|
41
|
+
};
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* lib/indexers/insights.ts - Insights indexer
|
|
3
|
+
*
|
|
4
|
+
* Reads log.jsonl and indexes insight summary captures.
|
|
5
|
+
* Filters for event=captured AND type=insight AND data.subtype=summary.
|
|
6
|
+
*
|
|
7
|
+
* Source: insights
|
|
8
|
+
* Topic: data.topic or "assistant"
|
|
9
|
+
* Type: summary (fixed)
|
|
10
|
+
* Timestamp: event timestamp
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import { readFileSync, existsSync } from "fs";
|
|
14
|
+
import type { IndexerContext } from "../indexer";
|
|
15
|
+
|
|
16
|
+
export async function indexInsights(ctx: IndexerContext): Promise<void> {
|
|
17
|
+
const logPath = `${ctx.config.paths.data}/log.jsonl`;
|
|
18
|
+
if (!existsSync(logPath)) {
|
|
19
|
+
console.log("No log.jsonl found, skipping insights");
|
|
20
|
+
return;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
const lines = readFileSync(logPath, "utf-8").split("\n").filter(Boolean);
|
|
24
|
+
|
|
25
|
+
for (const line of lines) {
|
|
26
|
+
try {
|
|
27
|
+
const event = JSON.parse(line);
|
|
28
|
+
if (event.event !== "captured" || event.type !== "insight") continue;
|
|
29
|
+
if (event.data?.subtype !== "summary") continue;
|
|
30
|
+
|
|
31
|
+
const topic = event.data?.topic || "assistant";
|
|
32
|
+
const content = event.data?.content || "";
|
|
33
|
+
const sessionId = event.data?.session_id;
|
|
34
|
+
|
|
35
|
+
if (!content) continue;
|
|
36
|
+
|
|
37
|
+
const metadata: Record<string, unknown> = {};
|
|
38
|
+
if (sessionId) metadata.session_id = sessionId;
|
|
39
|
+
|
|
40
|
+
ctx.insert({
|
|
41
|
+
source: "insights",
|
|
42
|
+
title: `[summary] ${topic}`,
|
|
43
|
+
content,
|
|
44
|
+
topic,
|
|
45
|
+
type: "summary",
|
|
46
|
+
timestamp: event.timestamp,
|
|
47
|
+
metadata: Object.keys(metadata).length > 0 ? metadata : undefined,
|
|
48
|
+
});
|
|
49
|
+
} catch (e) {
|
|
50
|
+
continue;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* lib/indexers/learnings.ts - Learnings indexer
|
|
3
|
+
*
|
|
4
|
+
* Reads log.jsonl and indexes learning captures.
|
|
5
|
+
* Filters for event=captured AND type=learning.
|
|
6
|
+
*
|
|
7
|
+
* Source: learnings
|
|
8
|
+
* Topic: data.topic
|
|
9
|
+
* Type: (empty)
|
|
10
|
+
* Timestamp: event timestamp
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import { readFileSync, existsSync } from "fs";
|
|
14
|
+
import type { IndexerContext } from "../indexer";
|
|
15
|
+
|
|
16
|
+
export async function indexLearnings(ctx: IndexerContext): Promise<void> {
|
|
17
|
+
const logPath = `${ctx.config.paths.data}/log.jsonl`;
|
|
18
|
+
|
|
19
|
+
if (!existsSync(logPath)) {
|
|
20
|
+
console.log("No log.jsonl found, skipping learnings");
|
|
21
|
+
return;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
const lines = readFileSync(logPath, "utf-8").split("\n").filter(Boolean);
|
|
25
|
+
|
|
26
|
+
for (const line of lines) {
|
|
27
|
+
try {
|
|
28
|
+
const event = JSON.parse(line);
|
|
29
|
+
if (event.event !== "captured" || event.type !== "learning") continue;
|
|
30
|
+
|
|
31
|
+
const topic = event.data?.topic || "general";
|
|
32
|
+
const content = event.data?.content || "";
|
|
33
|
+
const persona = event.data?.persona;
|
|
34
|
+
|
|
35
|
+
if (!content) continue;
|
|
36
|
+
|
|
37
|
+
const metadata: Record<string, unknown> = {};
|
|
38
|
+
if (persona) metadata.persona = persona;
|
|
39
|
+
|
|
40
|
+
ctx.insert({
|
|
41
|
+
source: "learnings",
|
|
42
|
+
title: `[learning] ${topic}`,
|
|
43
|
+
content,
|
|
44
|
+
topic,
|
|
45
|
+
timestamp: event.timestamp || "",
|
|
46
|
+
metadata: Object.keys(metadata).length > 0 ? metadata : undefined,
|
|
47
|
+
});
|
|
48
|
+
} catch {
|
|
49
|
+
// Skip malformed JSON
|
|
50
|
+
continue;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* lib/indexers/observations.ts - Observations indexer
|
|
3
|
+
*
|
|
4
|
+
* Reads log.jsonl and indexes observation captures.
|
|
5
|
+
* Filters for event=captured AND type=observation.
|
|
6
|
+
*
|
|
7
|
+
* Source: observations
|
|
8
|
+
* Topic: data.topic (AI-written)
|
|
9
|
+
* Type: data.subtype or "pattern" (default)
|
|
10
|
+
* Timestamp: event timestamp
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import { readFileSync, existsSync } from "fs";
|
|
14
|
+
import type { IndexerContext } from "../indexer";
|
|
15
|
+
|
|
16
|
+
export async function indexObservations(ctx: IndexerContext): Promise<void> {
|
|
17
|
+
const logPath = `${ctx.config.paths.data}/log.jsonl`;
|
|
18
|
+
if (!existsSync(logPath)) {
|
|
19
|
+
console.log("No log.jsonl found, skipping observations");
|
|
20
|
+
return;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
const lines = readFileSync(logPath, "utf-8").split("\n").filter(Boolean);
|
|
24
|
+
|
|
25
|
+
for (const line of lines) {
|
|
26
|
+
try {
|
|
27
|
+
const event = JSON.parse(line);
|
|
28
|
+
if (event.event !== "captured" || event.type !== "observation") continue;
|
|
29
|
+
|
|
30
|
+
const topic = event.data?.topic || "general";
|
|
31
|
+
const content = event.data?.content || "";
|
|
32
|
+
const subtype = event.data?.subtype || "pattern";
|
|
33
|
+
const confidence = event.data?.confidence;
|
|
34
|
+
|
|
35
|
+
if (!content) continue;
|
|
36
|
+
|
|
37
|
+
const metadata: Record<string, unknown> = {};
|
|
38
|
+
if (confidence) metadata.confidence = confidence;
|
|
39
|
+
|
|
40
|
+
ctx.insert({
|
|
41
|
+
source: "observations",
|
|
42
|
+
title: `[${subtype}] ${topic}`,
|
|
43
|
+
content,
|
|
44
|
+
topic,
|
|
45
|
+
type: subtype,
|
|
46
|
+
timestamp: event.timestamp,
|
|
47
|
+
metadata: Object.keys(metadata).length > 0 ? metadata : undefined,
|
|
48
|
+
});
|
|
49
|
+
} catch (e) {
|
|
50
|
+
continue;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
}
|