@voidwire/lore 0.9.0 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cli.ts +66 -1
- package/lib/config.ts +134 -0
- package/lib/indexer.ts +213 -0
- package/lib/indexers/blogs.ts +146 -0
- package/lib/indexers/captures.ts +105 -0
- package/lib/indexers/commits.ts +90 -0
- package/lib/indexers/development.ts +68 -0
- package/lib/indexers/events.ts +61 -0
- package/lib/indexers/explorations.ts +89 -0
- package/lib/indexers/flux.ts +142 -0
- package/lib/indexers/index.ts +41 -0
- package/lib/indexers/insights.ts +53 -0
- package/lib/indexers/learnings.ts +53 -0
- package/lib/indexers/observations.ts +53 -0
- package/lib/indexers/obsidian.ts +151 -0
- package/lib/indexers/personal.ts +262 -0
- package/lib/indexers/readmes.ts +49 -0
- package/lib/indexers/sessions.ts +127 -0
- package/lib/indexers/teachings.ts +52 -0
- package/lib/info.ts +3 -3
- package/lib/list.ts +23 -32
- package/lib/projects.ts +26 -31
- package/lib/realtime.ts +51 -19
- package/lib/search.ts +5 -7
- package/lib/semantic.ts +6 -31
- package/package.json +9 -8
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* lib/indexers/obsidian.ts - Obsidian vault indexer
|
|
3
|
+
*
|
|
4
|
+
* Recursively scans obsidian directory for markdown files.
|
|
5
|
+
* Extracts project, domain, and status from frontmatter.
|
|
6
|
+
* Skips personal subdirectory for privacy.
|
|
7
|
+
*
|
|
8
|
+
* Source: obsidian
|
|
9
|
+
* Topic: frontmatter project > domain > parent directory name > empty
|
|
10
|
+
* Type: (empty)
|
|
11
|
+
* Timestamp: file mtime as ISO 8601
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import { readdirSync, readFileSync, statSync, existsSync } from "fs";
|
|
15
|
+
import { join, basename, dirname } from "path";
|
|
16
|
+
import type { IndexerContext } from "../indexer";
|
|
17
|
+
|
|
18
|
+
function walkMarkdownFiles(
|
|
19
|
+
dir: string,
|
|
20
|
+
rootDir: string,
|
|
21
|
+
files: string[] = [],
|
|
22
|
+
): string[] {
|
|
23
|
+
const entries = readdirSync(dir, { withFileTypes: true });
|
|
24
|
+
|
|
25
|
+
for (const entry of entries) {
|
|
26
|
+
const fullPath = join(dir, entry.name);
|
|
27
|
+
|
|
28
|
+
if (entry.isDirectory()) {
|
|
29
|
+
// Skip personal directory (privacy filter)
|
|
30
|
+
if (entry.name === "personal") continue;
|
|
31
|
+
|
|
32
|
+
walkMarkdownFiles(fullPath, rootDir, files);
|
|
33
|
+
} else if (entry.isFile() && entry.name.endsWith(".md")) {
|
|
34
|
+
files.push(fullPath);
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
return files;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
export async function indexObsidian(ctx: IndexerContext): Promise<void> {
|
|
42
|
+
const obsidianDir = ctx.config.paths.obsidian;
|
|
43
|
+
|
|
44
|
+
if (!existsSync(obsidianDir)) {
|
|
45
|
+
console.log(`Obsidian directory not found: ${obsidianDir}`);
|
|
46
|
+
return;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
const files = walkMarkdownFiles(obsidianDir, obsidianDir);
|
|
50
|
+
|
|
51
|
+
for (const filePath of files) {
|
|
52
|
+
try {
|
|
53
|
+
const raw = readFileSync(filePath, "utf-8");
|
|
54
|
+
const mtime = statSync(filePath).mtime;
|
|
55
|
+
|
|
56
|
+
let content = raw;
|
|
57
|
+
let project: string | undefined;
|
|
58
|
+
let domain: string | undefined;
|
|
59
|
+
let status: string | undefined;
|
|
60
|
+
let fmDate: string | undefined;
|
|
61
|
+
let fmStarted: string | undefined;
|
|
62
|
+
|
|
63
|
+
// Extract frontmatter
|
|
64
|
+
const frontmatterMatch = raw.match(/^---\n([\s\S]*?)\n---\n/);
|
|
65
|
+
if (frontmatterMatch) {
|
|
66
|
+
const frontmatter = frontmatterMatch[1];
|
|
67
|
+
|
|
68
|
+
// Skip private notes
|
|
69
|
+
const privateMatch = frontmatter.match(/^private:\s*(true|yes)$/m);
|
|
70
|
+
if (privateMatch) continue;
|
|
71
|
+
|
|
72
|
+
const projectMatch = frontmatter.match(/^project:\s*(.+)$/m);
|
|
73
|
+
const domainMatch = frontmatter.match(/^domain:\s*(.+)$/m);
|
|
74
|
+
const statusMatch = frontmatter.match(/^status:\s*(.+)$/m);
|
|
75
|
+
const dateMatch = frontmatter.match(/^date:\s*(.+)$/m);
|
|
76
|
+
const startedMatch = frontmatter.match(/^started:\s*(.+)$/m);
|
|
77
|
+
|
|
78
|
+
const tagsMatch = frontmatter.match(/^tags:\s*\[(.+)\]$/m);
|
|
79
|
+
const tagsMultiMatch = !tagsMatch
|
|
80
|
+
? frontmatter.match(/^tags:\s*\n((?:\s+-\s+.+\n?)+)/m)
|
|
81
|
+
: null;
|
|
82
|
+
|
|
83
|
+
if (projectMatch) project = projectMatch[1].trim();
|
|
84
|
+
if (domainMatch) domain = domainMatch[1].trim();
|
|
85
|
+
if (statusMatch) status = statusMatch[1].trim();
|
|
86
|
+
if (dateMatch) fmDate = dateMatch[1].trim();
|
|
87
|
+
if (startedMatch) fmStarted = startedMatch[1].trim();
|
|
88
|
+
|
|
89
|
+
let tags: string[] = [];
|
|
90
|
+
if (tagsMatch) {
|
|
91
|
+
tags = tagsMatch[1].split(",").map((t) => t.trim().replace(/"/g, ""));
|
|
92
|
+
} else if (tagsMultiMatch) {
|
|
93
|
+
tags = tagsMultiMatch[1]
|
|
94
|
+
.split("\n")
|
|
95
|
+
.map((l) => l.replace(/^\s+-\s+/, "").trim())
|
|
96
|
+
.filter(Boolean);
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
content = raw.slice(frontmatterMatch[0].length);
|
|
100
|
+
|
|
101
|
+
if (tags.length > 0) {
|
|
102
|
+
content += `\nTags: ${tags.join(", ")}`;
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
// Topic derivation: project > domain > parent directory > empty
|
|
107
|
+
let topic = "";
|
|
108
|
+
if (project) {
|
|
109
|
+
topic = project;
|
|
110
|
+
} else if (domain) {
|
|
111
|
+
topic = domain;
|
|
112
|
+
} else {
|
|
113
|
+
const parentDir = basename(dirname(filePath));
|
|
114
|
+
if (parentDir !== basename(obsidianDir)) {
|
|
115
|
+
topic = parentDir;
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// Timestamp cascade: frontmatter date > started > file mtime
|
|
120
|
+
let timestamp: string;
|
|
121
|
+
if (fmDate) {
|
|
122
|
+
timestamp = fmDate.includes("T")
|
|
123
|
+
? fmDate
|
|
124
|
+
: `${fmDate.slice(0, 10)}T00:00:00Z`;
|
|
125
|
+
} else if (fmStarted) {
|
|
126
|
+
timestamp = fmStarted.includes("T")
|
|
127
|
+
? fmStarted
|
|
128
|
+
: `${fmStarted.slice(0, 10)}T00:00:00Z`;
|
|
129
|
+
} else {
|
|
130
|
+
timestamp = mtime.toISOString();
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
const metadata: Record<string, unknown> = {};
|
|
134
|
+
if (status) metadata.status = status;
|
|
135
|
+
|
|
136
|
+
const title = basename(filePath, ".md");
|
|
137
|
+
|
|
138
|
+
ctx.insert({
|
|
139
|
+
source: "obsidian",
|
|
140
|
+
title,
|
|
141
|
+
content,
|
|
142
|
+
topic,
|
|
143
|
+
timestamp,
|
|
144
|
+
metadata: Object.keys(metadata).length > 0 ? metadata : undefined,
|
|
145
|
+
});
|
|
146
|
+
} catch (e) {
|
|
147
|
+
console.warn(`Failed to read ${filePath}: ${e}`);
|
|
148
|
+
continue;
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
}
|
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* lib/indexers/personal.ts - Personal data indexer
|
|
3
|
+
*
|
|
4
|
+
* Reads JSON files from the personal data directory and indexes
|
|
5
|
+
* 8 types: book, person, movie, podcast, interest, habit, profile, preference.
|
|
6
|
+
*
|
|
7
|
+
* Source: personal
|
|
8
|
+
* Topic: (empty - type handles categorization)
|
|
9
|
+
* Type: derived from JSON filename
|
|
10
|
+
* Timestamp: file mtime, or date_read/date_watched when available
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import { readFileSync, statSync, existsSync } from "fs";
|
|
14
|
+
import { join } from "path";
|
|
15
|
+
import type { IndexerContext } from "../indexer";
|
|
16
|
+
|
|
17
|
+
function fileMtime(path: string): string {
|
|
18
|
+
return statSync(path).mtime.toISOString();
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
function toISO(dateStr: string, fallback: string): string {
|
|
22
|
+
if (!dateStr) return fallback;
|
|
23
|
+
const s = String(dateStr);
|
|
24
|
+
return s.includes("T") ? s : `${s.slice(0, 10)}T00:00:00Z`;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
export async function indexPersonal(ctx: IndexerContext): Promise<void> {
|
|
28
|
+
const personalDir = ctx.config.paths.personal;
|
|
29
|
+
|
|
30
|
+
if (!existsSync(personalDir)) {
|
|
31
|
+
console.log(`Personal data directory not found: ${personalDir}`);
|
|
32
|
+
return;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
// Books
|
|
36
|
+
const booksPath = join(personalDir, "books.json");
|
|
37
|
+
if (existsSync(booksPath)) {
|
|
38
|
+
try {
|
|
39
|
+
const booksTs = fileMtime(booksPath);
|
|
40
|
+
const books = JSON.parse(readFileSync(booksPath, "utf-8"));
|
|
41
|
+
for (const book of books) {
|
|
42
|
+
if (!book.title) continue;
|
|
43
|
+
const content = `${book.title} by ${book.author || "unknown"}\n${book.notes || ""}`;
|
|
44
|
+
const timestamp = book.date_read
|
|
45
|
+
? toISO(book.date_read, booksTs)
|
|
46
|
+
: booksTs;
|
|
47
|
+
|
|
48
|
+
ctx.insert({
|
|
49
|
+
source: "personal",
|
|
50
|
+
title: `[book] ${book.title}`,
|
|
51
|
+
content,
|
|
52
|
+
topic: "",
|
|
53
|
+
type: "book",
|
|
54
|
+
timestamp,
|
|
55
|
+
metadata: { name: book.title },
|
|
56
|
+
});
|
|
57
|
+
}
|
|
58
|
+
} catch (e) {
|
|
59
|
+
console.warn(`Failed to read books.json: ${e}`);
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
// People
|
|
64
|
+
const peoplePath = join(personalDir, "people.json");
|
|
65
|
+
if (existsSync(peoplePath)) {
|
|
66
|
+
try {
|
|
67
|
+
const peopleTs = fileMtime(peoplePath);
|
|
68
|
+
const people = JSON.parse(readFileSync(peoplePath, "utf-8"));
|
|
69
|
+
for (const person of people) {
|
|
70
|
+
if (!person.name) continue;
|
|
71
|
+
const content = `${person.name}\n${person.relationship || ""}\n${person.notes || ""}`;
|
|
72
|
+
|
|
73
|
+
ctx.insert({
|
|
74
|
+
source: "personal",
|
|
75
|
+
title: `[person] ${person.name}`,
|
|
76
|
+
content,
|
|
77
|
+
topic: "",
|
|
78
|
+
type: "person",
|
|
79
|
+
timestamp: peopleTs,
|
|
80
|
+
metadata: { name: person.name },
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
} catch (e) {
|
|
84
|
+
console.warn(`Failed to read people.json: ${e}`);
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// Movies
|
|
89
|
+
const moviesPath = join(personalDir, "movies.json");
|
|
90
|
+
if (existsSync(moviesPath)) {
|
|
91
|
+
try {
|
|
92
|
+
const moviesTs = fileMtime(moviesPath);
|
|
93
|
+
const movies = JSON.parse(readFileSync(moviesPath, "utf-8"));
|
|
94
|
+
for (const movie of movies) {
|
|
95
|
+
if (!movie.title) continue;
|
|
96
|
+
const year = movie.year || "";
|
|
97
|
+
const content = year
|
|
98
|
+
? `${movie.title} (${year})\n${movie.notes || ""}`
|
|
99
|
+
: `${movie.title}\n${movie.notes || ""}`;
|
|
100
|
+
const timestamp = movie.date_watched
|
|
101
|
+
? toISO(movie.date_watched, moviesTs)
|
|
102
|
+
: moviesTs;
|
|
103
|
+
|
|
104
|
+
ctx.insert({
|
|
105
|
+
source: "personal",
|
|
106
|
+
title: `[movie] ${movie.title}`,
|
|
107
|
+
content,
|
|
108
|
+
topic: "",
|
|
109
|
+
type: "movie",
|
|
110
|
+
timestamp,
|
|
111
|
+
metadata: { name: movie.title },
|
|
112
|
+
});
|
|
113
|
+
}
|
|
114
|
+
} catch (e) {
|
|
115
|
+
console.warn(`Failed to read movies.json: ${e}`);
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// Podcasts (field is "title", not "name")
|
|
120
|
+
const podcastsPath = join(personalDir, "podcasts.json");
|
|
121
|
+
if (existsSync(podcastsPath)) {
|
|
122
|
+
try {
|
|
123
|
+
const podcastsTs = fileMtime(podcastsPath);
|
|
124
|
+
const podcasts = JSON.parse(readFileSync(podcastsPath, "utf-8"));
|
|
125
|
+
for (const podcast of podcasts) {
|
|
126
|
+
const name = podcast.title || "";
|
|
127
|
+
if (!name) continue;
|
|
128
|
+
const content = `${name}\n${podcast.description || ""}`;
|
|
129
|
+
|
|
130
|
+
ctx.insert({
|
|
131
|
+
source: "personal",
|
|
132
|
+
title: `[podcast] ${name}`,
|
|
133
|
+
content,
|
|
134
|
+
topic: "",
|
|
135
|
+
type: "podcast",
|
|
136
|
+
timestamp: podcastsTs,
|
|
137
|
+
metadata: { name },
|
|
138
|
+
});
|
|
139
|
+
}
|
|
140
|
+
} catch (e) {
|
|
141
|
+
console.warn(`Failed to read podcasts.json: ${e}`);
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
// Interests (array of strings, not objects)
|
|
146
|
+
const interestsPath = join(personalDir, "interests.json");
|
|
147
|
+
if (existsSync(interestsPath)) {
|
|
148
|
+
try {
|
|
149
|
+
const interestsTs = fileMtime(interestsPath);
|
|
150
|
+
const interests = JSON.parse(readFileSync(interestsPath, "utf-8"));
|
|
151
|
+
for (const interest of interests) {
|
|
152
|
+
if (typeof interest !== "string" || !interest) continue;
|
|
153
|
+
|
|
154
|
+
ctx.insert({
|
|
155
|
+
source: "personal",
|
|
156
|
+
title: `[interest] ${interest}`,
|
|
157
|
+
content: interest,
|
|
158
|
+
topic: "",
|
|
159
|
+
type: "interest",
|
|
160
|
+
timestamp: interestsTs,
|
|
161
|
+
metadata: { name: interest },
|
|
162
|
+
});
|
|
163
|
+
}
|
|
164
|
+
} catch (e) {
|
|
165
|
+
console.warn(`Failed to read interests.json: ${e}`);
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
// Habits (field is "habit" not "name", "frequency" not "description")
|
|
170
|
+
const habitsPath = join(personalDir, "habits.json");
|
|
171
|
+
if (existsSync(habitsPath)) {
|
|
172
|
+
try {
|
|
173
|
+
const habitsTs = fileMtime(habitsPath);
|
|
174
|
+
const habits = JSON.parse(readFileSync(habitsPath, "utf-8"));
|
|
175
|
+
for (const habit of habits) {
|
|
176
|
+
const habitName = habit.habit || "";
|
|
177
|
+
if (!habitName) continue;
|
|
178
|
+
const frequency = habit.frequency || "";
|
|
179
|
+
const content = frequency ? `${habitName} (${frequency})` : habitName;
|
|
180
|
+
|
|
181
|
+
ctx.insert({
|
|
182
|
+
source: "personal",
|
|
183
|
+
title: `[habit] ${habitName}`,
|
|
184
|
+
content,
|
|
185
|
+
topic: "",
|
|
186
|
+
type: "habit",
|
|
187
|
+
timestamp: habitsTs,
|
|
188
|
+
metadata: { name: habitName },
|
|
189
|
+
});
|
|
190
|
+
}
|
|
191
|
+
} catch (e) {
|
|
192
|
+
console.warn(`Failed to read habits.json: ${e}`);
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
// Profile (one entry per field)
|
|
197
|
+
const profilePath = join(personalDir, "profile.json");
|
|
198
|
+
if (existsSync(profilePath)) {
|
|
199
|
+
try {
|
|
200
|
+
const profileTs = fileMtime(profilePath);
|
|
201
|
+
const profile = JSON.parse(readFileSync(profilePath, "utf-8"));
|
|
202
|
+
for (const [key, value] of Object.entries(profile)) {
|
|
203
|
+
if (value === null || value === undefined) continue;
|
|
204
|
+
const content = `${key}: ${String(value)}`;
|
|
205
|
+
|
|
206
|
+
ctx.insert({
|
|
207
|
+
source: "personal",
|
|
208
|
+
title: `[profile] ${key}`,
|
|
209
|
+
content,
|
|
210
|
+
topic: "",
|
|
211
|
+
type: "profile",
|
|
212
|
+
timestamp: profileTs,
|
|
213
|
+
metadata: { name: key },
|
|
214
|
+
});
|
|
215
|
+
}
|
|
216
|
+
} catch (e) {
|
|
217
|
+
console.warn(`Failed to read profile.json: ${e}`);
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
// Preferences (one entry per category/key)
|
|
222
|
+
const preferencesPath = join(personalDir, "preferences.json");
|
|
223
|
+
if (existsSync(preferencesPath)) {
|
|
224
|
+
try {
|
|
225
|
+
const prefsTs = fileMtime(preferencesPath);
|
|
226
|
+
const preferences = JSON.parse(readFileSync(preferencesPath, "utf-8"));
|
|
227
|
+
for (const [category, settings] of Object.entries(preferences)) {
|
|
228
|
+
if (typeof settings === "object" && settings !== null) {
|
|
229
|
+
for (const [key, value] of Object.entries(
|
|
230
|
+
settings as Record<string, unknown>,
|
|
231
|
+
)) {
|
|
232
|
+
const content = `${category}/${key}: ${String(value)}`;
|
|
233
|
+
|
|
234
|
+
ctx.insert({
|
|
235
|
+
source: "personal",
|
|
236
|
+
title: `[preference] ${category}/${key}`,
|
|
237
|
+
content,
|
|
238
|
+
topic: "",
|
|
239
|
+
type: "preference",
|
|
240
|
+
timestamp: prefsTs,
|
|
241
|
+
metadata: { name: `${category}/${key}` },
|
|
242
|
+
});
|
|
243
|
+
}
|
|
244
|
+
} else {
|
|
245
|
+
const content = `${category}: ${String(settings)}`;
|
|
246
|
+
|
|
247
|
+
ctx.insert({
|
|
248
|
+
source: "personal",
|
|
249
|
+
title: `[preference] ${category}`,
|
|
250
|
+
content,
|
|
251
|
+
topic: "",
|
|
252
|
+
type: "preference",
|
|
253
|
+
timestamp: prefsTs,
|
|
254
|
+
metadata: { name: category },
|
|
255
|
+
});
|
|
256
|
+
}
|
|
257
|
+
}
|
|
258
|
+
} catch (e) {
|
|
259
|
+
console.warn(`Failed to read preferences.json: ${e}`);
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
}
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* lib/indexers/readmes.ts - READMEs indexer
|
|
3
|
+
*
|
|
4
|
+
* Scans project directories for README.md and indexes content.
|
|
5
|
+
* Framework handles chunking for large READMEs.
|
|
6
|
+
*
|
|
7
|
+
* Source: readmes, Topic: project directory name,
|
|
8
|
+
* Type: empty, Timestamp: file mtime as ISO 8601
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import { readdirSync, readFileSync, statSync, existsSync } from "fs";
|
|
12
|
+
import { join } from "path";
|
|
13
|
+
import type { IndexerContext } from "../indexer";
|
|
14
|
+
|
|
15
|
+
export async function indexReadmes(ctx: IndexerContext): Promise<void> {
|
|
16
|
+
const projectsDir = ctx.config.paths.projects;
|
|
17
|
+
|
|
18
|
+
if (!existsSync(projectsDir)) {
|
|
19
|
+
console.log(`Projects directory not found: ${projectsDir}`);
|
|
20
|
+
return;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
const projects = readdirSync(projectsDir, { withFileTypes: true })
|
|
24
|
+
.filter((dirent) => dirent.isDirectory())
|
|
25
|
+
.map((dirent) => dirent.name);
|
|
26
|
+
|
|
27
|
+
for (const project of projects) {
|
|
28
|
+
const readmePath = join(projectsDir, project, "README.md");
|
|
29
|
+
|
|
30
|
+
if (!existsSync(readmePath)) continue;
|
|
31
|
+
|
|
32
|
+
try {
|
|
33
|
+
const content = readFileSync(readmePath, "utf-8");
|
|
34
|
+
const mtime = statSync(readmePath).mtime;
|
|
35
|
+
const timestamp = mtime.toISOString();
|
|
36
|
+
|
|
37
|
+
ctx.insert({
|
|
38
|
+
source: "readmes",
|
|
39
|
+
title: `README: ${project}`,
|
|
40
|
+
content,
|
|
41
|
+
topic: project,
|
|
42
|
+
timestamp,
|
|
43
|
+
});
|
|
44
|
+
} catch (e) {
|
|
45
|
+
console.warn(`Failed to read ${readmePath}: ${e}`);
|
|
46
|
+
continue;
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
}
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* lib/indexers/sessions.ts - Sessions indexer
|
|
3
|
+
*
|
|
4
|
+
* Reads session event JSONL files (hook telemetry) and aggregates by session_id.
|
|
5
|
+
* Each session gets one entry with summary, tools, model, and token counts.
|
|
6
|
+
*
|
|
7
|
+
* Source: sessions
|
|
8
|
+
* Topic: event.project (project the session was in)
|
|
9
|
+
* Type: (empty)
|
|
10
|
+
* Timestamp: first event timestamp per session
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import { readdirSync, readFileSync, existsSync } from "fs";
|
|
14
|
+
import { join } from "path";
|
|
15
|
+
import type { IndexerContext } from "../indexer";
|
|
16
|
+
|
|
17
|
+
interface SessionData {
|
|
18
|
+
project: string;
|
|
19
|
+
totalInput: number;
|
|
20
|
+
totalOutput: number;
|
|
21
|
+
toolsUsed: Set<string>;
|
|
22
|
+
model: string | null;
|
|
23
|
+
summary: string | null;
|
|
24
|
+
eventCount: number;
|
|
25
|
+
firstTs: string;
|
|
26
|
+
lastTs: string;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
export async function indexSessions(ctx: IndexerContext): Promise<void> {
|
|
30
|
+
const eventsDir = ctx.config.paths.session_events;
|
|
31
|
+
if (!eventsDir || !existsSync(eventsDir)) {
|
|
32
|
+
console.log("No session events directory found, skipping sessions");
|
|
33
|
+
return;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
const eventFiles = readdirSync(eventsDir)
|
|
37
|
+
.filter((f) => f.endsWith(".jsonl"))
|
|
38
|
+
.sort()
|
|
39
|
+
.map((f) => join(eventsDir, f));
|
|
40
|
+
|
|
41
|
+
if (eventFiles.length === 0) {
|
|
42
|
+
console.log("No session event files found, skipping sessions");
|
|
43
|
+
return;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
const sessions = new Map<string, SessionData>();
|
|
47
|
+
|
|
48
|
+
for (const eventFile of eventFiles) {
|
|
49
|
+
try {
|
|
50
|
+
const lines = readFileSync(eventFile, "utf-8")
|
|
51
|
+
.split("\n")
|
|
52
|
+
.filter(Boolean);
|
|
53
|
+
for (const line of lines) {
|
|
54
|
+
try {
|
|
55
|
+
const event = JSON.parse(line);
|
|
56
|
+
const sessionId = event.session_id;
|
|
57
|
+
if (!sessionId) continue;
|
|
58
|
+
|
|
59
|
+
if (!sessions.has(sessionId)) {
|
|
60
|
+
sessions.set(sessionId, {
|
|
61
|
+
project: event.project || "unknown",
|
|
62
|
+
totalInput: 0,
|
|
63
|
+
totalOutput: 0,
|
|
64
|
+
toolsUsed: new Set(),
|
|
65
|
+
model: null,
|
|
66
|
+
summary: null,
|
|
67
|
+
eventCount: 0,
|
|
68
|
+
firstTs: event.ts || "",
|
|
69
|
+
lastTs: event.ts || "",
|
|
70
|
+
});
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const session = sessions.get(sessionId)!;
|
|
74
|
+
session.eventCount++;
|
|
75
|
+
if (event.ts) session.lastTs = event.ts;
|
|
76
|
+
|
|
77
|
+
const data = event.data || {};
|
|
78
|
+
const tokens = data.tokens || {};
|
|
79
|
+
session.totalInput += tokens.input || 0;
|
|
80
|
+
session.totalOutput += tokens.output || 0;
|
|
81
|
+
|
|
82
|
+
const tools: string[] = data.tools_used || [];
|
|
83
|
+
for (const tool of tools) session.toolsUsed.add(tool);
|
|
84
|
+
|
|
85
|
+
if (!session.model && data.model) session.model = data.model;
|
|
86
|
+
if (data.summary) session.summary = data.summary;
|
|
87
|
+
} catch {
|
|
88
|
+
continue;
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
} catch (e) {
|
|
92
|
+
console.warn(`Failed to read ${eventFile}: ${e}`);
|
|
93
|
+
continue;
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
for (const [sessionId, session] of sessions) {
|
|
98
|
+
const tools = Array.from(session.toolsUsed).sort();
|
|
99
|
+
const model = session.model || "unknown";
|
|
100
|
+
const totalTokens = session.totalInput + session.totalOutput;
|
|
101
|
+
const date = session.firstTs.slice(0, 10) || "unknown";
|
|
102
|
+
|
|
103
|
+
let content: string;
|
|
104
|
+
if (session.summary) {
|
|
105
|
+
content = `${session.summary} Tools: ${tools.join(", ")}. Model: ${model}. Tokens: ${session.totalInput} input, ${session.totalOutput} output.`;
|
|
106
|
+
} else {
|
|
107
|
+
content = `Session for ${session.project} on ${date}. Tools: ${tools.join(", ")}. Model: ${model}. Events: ${session.eventCount}. Tokens: ${session.totalInput} input, ${session.totalOutput} output.`;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
if (!content) continue;
|
|
111
|
+
|
|
112
|
+
ctx.insert({
|
|
113
|
+
source: "sessions",
|
|
114
|
+
title: `[session] ${session.project} (${date})`,
|
|
115
|
+
content,
|
|
116
|
+
topic: session.project,
|
|
117
|
+
timestamp: session.firstTs,
|
|
118
|
+
metadata: {
|
|
119
|
+
session_id: sessionId,
|
|
120
|
+
model,
|
|
121
|
+
tools_used: tools,
|
|
122
|
+
total_tokens: totalTokens,
|
|
123
|
+
event_count: session.eventCount,
|
|
124
|
+
},
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
}
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* lib/indexers/teachings.ts - Teachings indexer
|
|
3
|
+
*
|
|
4
|
+
* Reads log.jsonl and indexes teaching captures.
|
|
5
|
+
* Filters for event=captured AND type=teaching.
|
|
6
|
+
*
|
|
7
|
+
* Source: teachings
|
|
8
|
+
* Topic: data.topic (AI-written)
|
|
9
|
+
* Type: teaching (fixed)
|
|
10
|
+
* Timestamp: event timestamp
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import { readFileSync, existsSync } from "fs";
|
|
14
|
+
import type { IndexerContext } from "../indexer";
|
|
15
|
+
|
|
16
|
+
export async function indexTeachings(ctx: IndexerContext): Promise<void> {
|
|
17
|
+
const logPath = `${ctx.config.paths.data}/log.jsonl`;
|
|
18
|
+
if (!existsSync(logPath)) {
|
|
19
|
+
console.log("No log.jsonl found, skipping teachings");
|
|
20
|
+
return;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
const lines = readFileSync(logPath, "utf-8").split("\n").filter(Boolean);
|
|
24
|
+
|
|
25
|
+
for (const line of lines) {
|
|
26
|
+
try {
|
|
27
|
+
const event = JSON.parse(line);
|
|
28
|
+
if (event.event !== "captured" || event.type !== "teaching") continue;
|
|
29
|
+
|
|
30
|
+
const topic = event.data?.topic || "general";
|
|
31
|
+
const content = event.data?.content || "";
|
|
32
|
+
const confidence = event.data?.confidence;
|
|
33
|
+
|
|
34
|
+
if (!content) continue;
|
|
35
|
+
|
|
36
|
+
const metadata: Record<string, unknown> = {};
|
|
37
|
+
if (confidence) metadata.confidence = confidence;
|
|
38
|
+
|
|
39
|
+
ctx.insert({
|
|
40
|
+
source: "teachings",
|
|
41
|
+
title: `[teaching] ${topic}`,
|
|
42
|
+
content,
|
|
43
|
+
topic,
|
|
44
|
+
type: "teaching",
|
|
45
|
+
timestamp: event.timestamp,
|
|
46
|
+
metadata: Object.keys(metadata).length > 0 ? metadata : undefined,
|
|
47
|
+
});
|
|
48
|
+
} catch (e) {
|
|
49
|
+
continue;
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
}
|
package/lib/info.ts
CHANGED
|
@@ -62,11 +62,11 @@ export function info(): InfoOutput {
|
|
|
62
62
|
const totalResult = totalStmt.get() as { total: number };
|
|
63
63
|
const total_entries = totalResult?.total ?? 0;
|
|
64
64
|
|
|
65
|
-
// Get last indexed timestamp from
|
|
65
|
+
// Get last indexed timestamp from column
|
|
66
66
|
const tsStmt = db.prepare(`
|
|
67
|
-
SELECT MAX(
|
|
67
|
+
SELECT MAX(timestamp) as ts
|
|
68
68
|
FROM search
|
|
69
|
-
WHERE
|
|
69
|
+
WHERE timestamp IS NOT NULL AND timestamp != ''
|
|
70
70
|
`);
|
|
71
71
|
const tsResult = tsStmt.get() as { ts: string | null };
|
|
72
72
|
const last_indexed = tsResult?.ts ?? new Date().toISOString();
|