@kibhq/core 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/package.json +40 -0
  2. package/src/compile/backlinks.test.ts +112 -0
  3. package/src/compile/backlinks.ts +80 -0
  4. package/src/compile/cache.test.ts +126 -0
  5. package/src/compile/cache.ts +125 -0
  6. package/src/compile/compiler.test.ts +278 -0
  7. package/src/compile/compiler.ts +305 -0
  8. package/src/compile/diff.test.ts +164 -0
  9. package/src/compile/diff.ts +121 -0
  10. package/src/compile/index-manager.test.ts +227 -0
  11. package/src/compile/index-manager.ts +148 -0
  12. package/src/compile/prompts.ts +124 -0
  13. package/src/constants.ts +40 -0
  14. package/src/errors.ts +66 -0
  15. package/src/hash.test.ts +21 -0
  16. package/src/hash.ts +24 -0
  17. package/src/index.ts +22 -0
  18. package/src/ingest/extractors/file.test.ts +129 -0
  19. package/src/ingest/extractors/file.ts +136 -0
  20. package/src/ingest/extractors/github.test.ts +47 -0
  21. package/src/ingest/extractors/github.ts +135 -0
  22. package/src/ingest/extractors/interface.ts +26 -0
  23. package/src/ingest/extractors/pdf.ts +130 -0
  24. package/src/ingest/extractors/web.test.ts +242 -0
  25. package/src/ingest/extractors/web.ts +163 -0
  26. package/src/ingest/extractors/youtube.test.ts +44 -0
  27. package/src/ingest/extractors/youtube.ts +166 -0
  28. package/src/ingest/ingest.test.ts +187 -0
  29. package/src/ingest/ingest.ts +179 -0
  30. package/src/ingest/normalize.test.ts +120 -0
  31. package/src/ingest/normalize.ts +83 -0
  32. package/src/ingest/router.test.ts +154 -0
  33. package/src/ingest/router.ts +119 -0
  34. package/src/lint/lint.test.ts +253 -0
  35. package/src/lint/lint.ts +43 -0
  36. package/src/lint/rules.ts +178 -0
  37. package/src/providers/anthropic.ts +107 -0
  38. package/src/providers/index.ts +4 -0
  39. package/src/providers/ollama.ts +101 -0
  40. package/src/providers/openai.ts +67 -0
  41. package/src/providers/router.ts +62 -0
  42. package/src/query/query.test.ts +165 -0
  43. package/src/query/query.ts +136 -0
  44. package/src/schemas.ts +193 -0
  45. package/src/search/engine.test.ts +230 -0
  46. package/src/search/engine.ts +390 -0
  47. package/src/skills/loader.ts +163 -0
  48. package/src/skills/runner.ts +139 -0
  49. package/src/skills/schema.ts +28 -0
  50. package/src/skills/skills.test.ts +134 -0
  51. package/src/types.ts +136 -0
  52. package/src/vault.test.ts +141 -0
  53. package/src/vault.ts +251 -0
@@ -0,0 +1,178 @@
1
+ import { readFile } from "node:fs/promises";
2
+ import { relative } from "node:path";
3
+ import { extractWikilinks, parseFrontmatter } from "../compile/diff.js";
4
+ import { WIKI_DIR } from "../constants.js";
5
+ import type { LintDiagnostic, Manifest } from "../types.js";
6
+ import { listWiki } from "../vault.js";
7
+
8
+ type LintRuleFn = (root: string, manifest: Manifest) => Promise<LintDiagnostic[]>;
9
+
10
+ /**
11
+ * Find articles with no backlinks from other articles (orphans).
12
+ */
13
+ export const orphanRule: LintRuleFn = async (root, manifest) => {
14
+ const diagnostics: LintDiagnostic[] = [];
15
+
16
+ for (const [slug, article] of Object.entries(manifest.articles)) {
17
+ if (article.backlinks.length === 0 && article.category !== "output") {
18
+ diagnostics.push({
19
+ rule: "orphan",
20
+ severity: "warning",
21
+ message: `No backlinks from other articles`,
22
+ path: `wiki/${article.category}s/${slug}.md`,
23
+ fixable: false,
24
+ });
25
+ }
26
+ }
27
+
28
+ return diagnostics;
29
+ };
30
+
31
+ /**
32
+ * Find [[wikilinks]] that point to non-existent articles.
33
+ */
34
+ export const brokenLinkRule: LintRuleFn = async (root, manifest) => {
35
+ const diagnostics: LintDiagnostic[] = [];
36
+ const wikiDir = `${root}/${WIKI_DIR}`;
37
+ const files = await listWiki(root);
38
+ const articleFiles = files.filter((f) => !f.endsWith("INDEX.md") && !f.endsWith("GRAPH.md"));
39
+
40
+ // Build set of known slugs
41
+ const knownSlugs = new Set(Object.keys(manifest.articles));
42
+
43
+ for (const filePath of articleFiles) {
44
+ const content = await readFile(filePath, "utf-8");
45
+ const links = extractWikilinks(content);
46
+ const relPath = relative(wikiDir, filePath);
47
+
48
+ for (const link of links) {
49
+ if (!knownSlugs.has(link)) {
50
+ diagnostics.push({
51
+ rule: "broken-link",
52
+ severity: "error",
53
+ message: `Broken wikilink [[${link}]] — no article with this slug exists`,
54
+ path: relPath,
55
+ fixable: false,
56
+ });
57
+ }
58
+ }
59
+ }
60
+
61
+ return diagnostics;
62
+ };
63
+
64
+ /**
65
+ * Find sources whose content hash changed but article hasn't been recompiled.
66
+ */
67
+ export const staleRule: LintRuleFn = async (_root, manifest) => {
68
+ const diagnostics: LintDiagnostic[] = [];
69
+
70
+ for (const [sourceId, source] of Object.entries(manifest.sources)) {
71
+ if (!source.lastCompiled || source.lastCompiled < source.ingestedAt) {
72
+ diagnostics.push({
73
+ rule: "stale",
74
+ severity: "warning",
75
+ message: `Source "${source.metadata.title ?? sourceId}" has not been compiled since last ingest`,
76
+ path: source.producedArticles[0],
77
+ fixable: true,
78
+ });
79
+ }
80
+ }
81
+
82
+ return diagnostics;
83
+ };
84
+
85
+ /**
86
+ * Find articles with missing or malformed YAML frontmatter.
87
+ */
88
+ export const frontmatterRule: LintRuleFn = async (root) => {
89
+ const diagnostics: LintDiagnostic[] = [];
90
+ const wikiDir = `${root}/${WIKI_DIR}`;
91
+ const files = await listWiki(root);
92
+ const articleFiles = files.filter((f) => !f.endsWith("INDEX.md") && !f.endsWith("GRAPH.md"));
93
+
94
+ const requiredFields = ["title", "slug", "category"];
95
+
96
+ for (const filePath of articleFiles) {
97
+ const content = await readFile(filePath, "utf-8");
98
+ const relPath = relative(wikiDir, filePath);
99
+
100
+ // Check if frontmatter exists at all
101
+ if (!content.startsWith("---")) {
102
+ diagnostics.push({
103
+ rule: "frontmatter",
104
+ severity: "error",
105
+ message: "Missing YAML frontmatter",
106
+ path: relPath,
107
+ fixable: false,
108
+ });
109
+ continue;
110
+ }
111
+
112
+ const { frontmatter } = parseFrontmatter(content);
113
+
114
+ for (const field of requiredFields) {
115
+ if (!frontmatter[field]) {
116
+ diagnostics.push({
117
+ rule: "frontmatter",
118
+ severity: "error",
119
+ message: `Missing required frontmatter field: ${field}`,
120
+ path: relPath,
121
+ fixable: false,
122
+ });
123
+ }
124
+ }
125
+ }
126
+
127
+ return diagnostics;
128
+ };
129
+
130
+ /**
131
+ * Find concepts/topics mentioned across multiple articles that don't have their own article.
132
+ */
133
+ export const missingRule: LintRuleFn = async (root, manifest) => {
134
+ const diagnostics: LintDiagnostic[] = [];
135
+ const wikiDir = `${root}/${WIKI_DIR}`;
136
+ const files = await listWiki(root);
137
+ const articleFiles = files.filter((f) => !f.endsWith("INDEX.md") && !f.endsWith("GRAPH.md"));
138
+
139
+ // Collect all wikilinks across all articles
140
+ const linkCounts = new Map<string, number>();
141
+ const knownSlugs = new Set(Object.keys(manifest.articles));
142
+
143
+ for (const filePath of articleFiles) {
144
+ const content = await readFile(filePath, "utf-8");
145
+ const links = extractWikilinks(content);
146
+
147
+ for (const link of links) {
148
+ if (!knownSlugs.has(link)) {
149
+ linkCounts.set(link, (linkCounts.get(link) ?? 0) + 1);
150
+ }
151
+ }
152
+ }
153
+
154
+ // Report topics mentioned in 3+ articles without a dedicated article
155
+ for (const [slug, count] of linkCounts) {
156
+ if (count >= 3) {
157
+ diagnostics.push({
158
+ rule: "missing",
159
+ severity: "info",
160
+ message: `"${slug}" is referenced in ${count} articles but has no dedicated article`,
161
+ fixable: true,
162
+ });
163
+ }
164
+ }
165
+
166
+ return diagnostics;
167
+ };
168
+
169
+ /**
170
+ * All lint rules.
171
+ */
172
+ export const ALL_RULES: { name: string; fn: LintRuleFn }[] = [
173
+ { name: "orphan", fn: orphanRule },
174
+ { name: "broken-link", fn: brokenLinkRule },
175
+ { name: "stale", fn: staleRule },
176
+ { name: "frontmatter", fn: frontmatterRule },
177
+ { name: "missing", fn: missingRule },
178
+ ];
@@ -0,0 +1,107 @@
1
+ import type { CompletionParams, CompletionResult, LLMProvider, StreamChunk } from "../types.js";
2
+
3
+ // Lazy-loaded SDK
4
+ let AnthropicClass: any = null;
5
+
6
+ async function getClient() {
7
+ if (!AnthropicClass) {
8
+ const mod = await import("@anthropic-ai/sdk");
9
+ AnthropicClass = mod.default;
10
+ }
11
+ return new AnthropicClass();
12
+ }
13
+
14
+ export function createAnthropicProvider(model: string): LLMProvider {
15
+ return {
16
+ name: "anthropic",
17
+
18
+ async complete(params: CompletionParams): Promise<CompletionResult> {
19
+ const client = await getClient();
20
+ const response = await client.messages.create({
21
+ model,
22
+ max_tokens: params.maxTokens ?? 4096,
23
+ temperature: params.temperature ?? 0,
24
+ system: params.system,
25
+ messages: params.messages.map((m) => ({
26
+ role: m.role,
27
+ content: m.content,
28
+ })),
29
+ });
30
+
31
+ const content = response.content
32
+ .filter((b: any) => b.type === "text")
33
+ .map((b: any) => b.text)
34
+ .join("");
35
+
36
+ return {
37
+ content,
38
+ usage: {
39
+ inputTokens: response.usage.input_tokens,
40
+ outputTokens: response.usage.output_tokens,
41
+ },
42
+ stopReason: response.stop_reason === "end_turn" ? "end_turn" : "max_tokens",
43
+ };
44
+ },
45
+
46
+ async *stream(params: CompletionParams): AsyncIterable<StreamChunk> {
47
+ const client = await getClient();
48
+ const stream = await client.messages.stream({
49
+ model,
50
+ max_tokens: params.maxTokens ?? 4096,
51
+ temperature: params.temperature ?? 0,
52
+ system: params.system,
53
+ messages: params.messages.map((m) => ({
54
+ role: m.role,
55
+ content: m.content,
56
+ })),
57
+ });
58
+
59
+ for await (const event of stream) {
60
+ if (event.type === "content_block_delta" && event.delta.type === "text_delta") {
61
+ yield { type: "text", text: event.delta.text };
62
+ }
63
+ if (event.type === "message_delta" && event.usage) {
64
+ yield {
65
+ type: "usage",
66
+ usage: {
67
+ inputTokens: event.usage.input_tokens ?? 0,
68
+ outputTokens: event.usage.output_tokens ?? 0,
69
+ },
70
+ };
71
+ }
72
+ }
73
+ },
74
+
75
+ async vision(params: { image: Buffer; prompt: string }): Promise<string> {
76
+ const client = await getClient();
77
+ const response = await client.messages.create({
78
+ model,
79
+ max_tokens: 4096,
80
+ messages: [
81
+ {
82
+ role: "user",
83
+ content: [
84
+ {
85
+ type: "image",
86
+ source: {
87
+ type: "base64",
88
+ media_type: "image/png",
89
+ data: params.image.toString("base64"),
90
+ },
91
+ },
92
+ {
93
+ type: "text",
94
+ text: params.prompt,
95
+ },
96
+ ],
97
+ },
98
+ ],
99
+ });
100
+
101
+ return response.content
102
+ .filter((b: any) => b.type === "text")
103
+ .map((b: any) => b.text)
104
+ .join("");
105
+ },
106
+ };
107
+ }
@@ -0,0 +1,4 @@
1
+ export { createAnthropicProvider } from "./anthropic.js";
2
+ export { createOllamaProvider } from "./ollama.js";
3
+ export { createOpenAIProvider } from "./openai.js";
4
+ export { createProvider, detectProvider } from "./router.js";
@@ -0,0 +1,101 @@
1
+ import type { CompletionParams, CompletionResult, LLMProvider, StreamChunk } from "../types.js";
2
+
3
+ const OLLAMA_BASE = "http://localhost:11434";
4
+
5
+ export function createOllamaProvider(model: string): LLMProvider {
6
+ return {
7
+ name: "ollama",
8
+
9
+ async complete(params: CompletionParams): Promise<CompletionResult> {
10
+ const messages = [
11
+ { role: "system", content: params.system },
12
+ ...params.messages.map((m) => ({ role: m.role, content: m.content })),
13
+ ];
14
+
15
+ const response = await fetch(`${OLLAMA_BASE}/api/chat`, {
16
+ method: "POST",
17
+ headers: { "Content-Type": "application/json" },
18
+ body: JSON.stringify({
19
+ model,
20
+ messages,
21
+ stream: false,
22
+ options: {
23
+ temperature: params.temperature ?? 0,
24
+ num_predict: params.maxTokens ?? 4096,
25
+ },
26
+ }),
27
+ });
28
+
29
+ if (!response.ok) {
30
+ throw new Error(`Ollama request failed: ${response.status} ${response.statusText}`);
31
+ }
32
+
33
+ const data = (await response.json()) as any;
34
+ return {
35
+ content: data.message?.content ?? "",
36
+ usage: {
37
+ inputTokens: data.prompt_eval_count ?? 0,
38
+ outputTokens: data.eval_count ?? 0,
39
+ },
40
+ stopReason: "end_turn",
41
+ };
42
+ },
43
+
44
+ async *stream(params: CompletionParams): AsyncIterable<StreamChunk> {
45
+ const messages = [
46
+ { role: "system", content: params.system },
47
+ ...params.messages.map((m) => ({ role: m.role, content: m.content })),
48
+ ];
49
+
50
+ const response = await fetch(`${OLLAMA_BASE}/api/chat`, {
51
+ method: "POST",
52
+ headers: { "Content-Type": "application/json" },
53
+ body: JSON.stringify({
54
+ model,
55
+ messages,
56
+ stream: true,
57
+ options: {
58
+ temperature: params.temperature ?? 0,
59
+ num_predict: params.maxTokens ?? 4096,
60
+ },
61
+ }),
62
+ });
63
+
64
+ if (!response.ok) {
65
+ throw new Error(`Ollama request failed: ${response.status} ${response.statusText}`);
66
+ }
67
+
68
+ const reader = response.body?.getReader();
69
+ if (!reader) return;
70
+
71
+ const decoder = new TextDecoder();
72
+ let buffer = "";
73
+
74
+ while (true) {
75
+ const { done, value } = await reader.read();
76
+ if (done) break;
77
+
78
+ buffer += decoder.decode(value, { stream: true });
79
+ const lines = buffer.split("\n");
80
+ buffer = lines.pop() ?? "";
81
+
82
+ for (const line of lines) {
83
+ if (!line.trim()) continue;
84
+ const data = JSON.parse(line) as any;
85
+ if (data.message?.content) {
86
+ yield { type: "text", text: data.message.content };
87
+ }
88
+ if (data.done && data.eval_count) {
89
+ yield {
90
+ type: "usage",
91
+ usage: {
92
+ inputTokens: data.prompt_eval_count ?? 0,
93
+ outputTokens: data.eval_count ?? 0,
94
+ },
95
+ };
96
+ }
97
+ }
98
+ }
99
+ },
100
+ };
101
+ }
@@ -0,0 +1,67 @@
1
+ import type { CompletionParams, CompletionResult, LLMProvider, StreamChunk } from "../types.js";
2
+
3
+ // Lazy-loaded SDK
4
+ let OpenAIClass: any = null;
5
+
6
+ async function getClient() {
7
+ if (!OpenAIClass) {
8
+ const mod = await import("openai");
9
+ OpenAIClass = mod.default;
10
+ }
11
+ return new OpenAIClass();
12
+ }
13
+
14
+ export function createOpenAIProvider(model: string): LLMProvider {
15
+ return {
16
+ name: "openai",
17
+
18
+ async complete(params: CompletionParams): Promise<CompletionResult> {
19
+ const client = await getClient();
20
+ const response = await client.chat.completions.create({
21
+ model,
22
+ max_tokens: params.maxTokens ?? 4096,
23
+ temperature: params.temperature ?? 0,
24
+ messages: [
25
+ { role: "system", content: params.system },
26
+ ...params.messages.map((m) => ({
27
+ role: m.role as "user" | "assistant",
28
+ content: m.content,
29
+ })),
30
+ ],
31
+ });
32
+
33
+ return {
34
+ content: response.choices[0]?.message?.content ?? "",
35
+ usage: {
36
+ inputTokens: response.usage?.prompt_tokens ?? 0,
37
+ outputTokens: response.usage?.completion_tokens ?? 0,
38
+ },
39
+ stopReason: response.choices[0]?.finish_reason === "stop" ? "end_turn" : "max_tokens",
40
+ };
41
+ },
42
+
43
+ async *stream(params: CompletionParams): AsyncIterable<StreamChunk> {
44
+ const client = await getClient();
45
+ const stream = await client.chat.completions.create({
46
+ model,
47
+ max_tokens: params.maxTokens ?? 4096,
48
+ temperature: params.temperature ?? 0,
49
+ stream: true,
50
+ messages: [
51
+ { role: "system", content: params.system },
52
+ ...params.messages.map((m) => ({
53
+ role: m.role as "user" | "assistant",
54
+ content: m.content,
55
+ })),
56
+ ],
57
+ });
58
+
59
+ for await (const chunk of stream) {
60
+ const delta = chunk.choices[0]?.delta?.content;
61
+ if (delta) {
62
+ yield { type: "text", text: delta };
63
+ }
64
+ }
65
+ },
66
+ };
67
+ }
@@ -0,0 +1,62 @@
1
+ import { DEFAULTS } from "../constants.js";
2
+ import { NoProviderError } from "../errors.js";
3
+ import type { LLMProvider } from "../types.js";
4
+
5
+ interface DetectedProvider {
6
+ name: string;
7
+ model: string;
8
+ }
9
+
10
+ /**
11
+ * Auto-detect LLM provider from environment variables.
12
+ */
13
+ export function detectProvider(): DetectedProvider {
14
+ if (process.env.ANTHROPIC_API_KEY) {
15
+ return { name: "anthropic", model: DEFAULTS.model };
16
+ }
17
+ if (process.env.OPENAI_API_KEY) {
18
+ return { name: "openai", model: "gpt-4o" };
19
+ }
20
+ // Ollama detection is async — handled in createProvider
21
+ return { name: "ollama", model: "llama3" };
22
+ }
23
+
24
+ /**
25
+ * Create an LLM provider instance.
26
+ * Lazy-loads the SDK for the selected provider.
27
+ */
28
+ export async function createProvider(providerName?: string, model?: string): Promise<LLMProvider> {
29
+ const detected = detectProvider();
30
+ const name = providerName ?? detected.name;
31
+ const selectedModel = model ?? detected.model;
32
+
33
+ switch (name) {
34
+ case "anthropic": {
35
+ if (!process.env.ANTHROPIC_API_KEY) {
36
+ throw new NoProviderError();
37
+ }
38
+ const { createAnthropicProvider } = await import("./anthropic.js");
39
+ return createAnthropicProvider(selectedModel);
40
+ }
41
+ case "openai": {
42
+ if (!process.env.OPENAI_API_KEY) {
43
+ throw new NoProviderError();
44
+ }
45
+ const { createOpenAIProvider } = await import("./openai.js");
46
+ return createOpenAIProvider(selectedModel);
47
+ }
48
+ case "ollama": {
49
+ // Check if Ollama is running
50
+ try {
51
+ const res = await fetch("http://localhost:11434/api/tags");
52
+ if (!res.ok) throw new Error("Not running");
53
+ } catch {
54
+ throw new NoProviderError();
55
+ }
56
+ const { createOllamaProvider } = await import("./ollama.js");
57
+ return createOllamaProvider(selectedModel);
58
+ }
59
+ default:
60
+ throw new NoProviderError();
61
+ }
62
+ }
@@ -0,0 +1,165 @@
1
+ import { afterEach, describe, expect, test } from "bun:test";
2
+ import { mkdtemp, rm } from "node:fs/promises";
3
+ import { tmpdir } from "node:os";
4
+ import { join } from "node:path";
5
+ import { SearchIndex } from "../search/engine.js";
6
+ import type { CompletionParams, CompletionResult, LLMProvider, StreamChunk } from "../types.js";
7
+ import { initVault, writeWiki } from "../vault.js";
8
+ import { queryVault } from "./query.js";
9
+
10
+ let tempDir: string;
11
+
12
+ afterEach(async () => {
13
+ if (tempDir) await rm(tempDir, { recursive: true, force: true });
14
+ });
15
+
16
+ async function makeTempVault() {
17
+ tempDir = await mkdtemp(join(tmpdir(), "kib-query-test-"));
18
+ await initVault(tempDir, { name: "test" });
19
+ return tempDir;
20
+ }
21
+
22
+ function mockProvider(response: string): LLMProvider {
23
+ return {
24
+ name: "mock",
25
+ async complete(): Promise<CompletionResult> {
26
+ return {
27
+ content: response,
28
+ usage: { inputTokens: 500, outputTokens: 100 },
29
+ stopReason: "end_turn",
30
+ };
31
+ },
32
+ async *stream(): AsyncIterable<StreamChunk> {
33
+ for (const char of response) {
34
+ yield { type: "text", text: char };
35
+ }
36
+ yield { type: "usage", usage: { inputTokens: 500, outputTokens: 100 } };
37
+ },
38
+ };
39
+ }
40
+
41
+ function articleMd(title: string, slug: string, content: string): string {
42
+ return `---
43
+ title: ${title}
44
+ slug: ${slug}
45
+ category: concept
46
+ tags: []
47
+ summary: ""
48
+ ---
49
+
50
+ # ${title}
51
+
52
+ ${content}`;
53
+ }
54
+
55
+ describe("queryVault", () => {
56
+ test("queries relevant articles and returns answer", async () => {
57
+ const root = await makeTempVault();
58
+
59
+ await writeWiki(
60
+ root,
61
+ "concepts/transformers.md",
62
+ articleMd(
63
+ "Transformer Architecture",
64
+ "transformer-architecture",
65
+ "The transformer is a neural network architecture based on self-attention.",
66
+ ),
67
+ );
68
+ await writeWiki(
69
+ root,
70
+ "concepts/attention.md",
71
+ articleMd(
72
+ "Attention Mechanisms",
73
+ "attention-mechanisms",
74
+ "Attention computes weighted sums over value vectors.",
75
+ ),
76
+ );
77
+
78
+ // Build search index so query can find articles
79
+ const index = new SearchIndex();
80
+ await index.build(root, "wiki");
81
+ await index.save(root);
82
+
83
+ const provider = mockProvider(
84
+ "The transformer architecture uses self-attention [Transformer Architecture].",
85
+ );
86
+
87
+ const result = await queryVault(root, "How do transformers work?", provider);
88
+
89
+ expect(result.answer).toContain("transformer");
90
+ expect(result.sourcePaths.length).toBeGreaterThan(0);
91
+ expect(result.usage.inputTokens).toBeGreaterThan(0);
92
+ });
93
+
94
+ test("returns answer even with empty wiki", async () => {
95
+ const root = await makeTempVault();
96
+
97
+ const provider = mockProvider("No relevant articles found in the knowledge base.");
98
+
99
+ const result = await queryVault(root, "What is a transformer?", provider);
100
+ expect(result.answer).toBeTruthy();
101
+ });
102
+
103
+ test("supports streaming mode", async () => {
104
+ const root = await makeTempVault();
105
+ await writeWiki(
106
+ root,
107
+ "concepts/test.md",
108
+ articleMd("Test", "test", "Test content for streaming."),
109
+ );
110
+
111
+ const index = new SearchIndex();
112
+ await index.build(root, "wiki");
113
+ await index.save(root);
114
+
115
+ const provider = mockProvider("Streamed answer.");
116
+
117
+ const chunks: string[] = [];
118
+ const result = await queryVault(root, "test", provider, {
119
+ onChunk: (text) => chunks.push(text),
120
+ });
121
+
122
+ expect(chunks.length).toBeGreaterThan(0);
123
+ expect(chunks.join("")).toBe("Streamed answer.");
124
+ expect(result.answer).toBe("Streamed answer.");
125
+ });
126
+
127
+ test("supports conversation history", async () => {
128
+ const root = await makeTempVault();
129
+ await writeWiki(root, "concepts/test.md", articleMd("Test", "test", "Test content."));
130
+
131
+ const index = new SearchIndex();
132
+ await index.build(root, "wiki");
133
+ await index.save(root);
134
+
135
+ // Track what gets sent to the provider
136
+ let receivedMessages: any[] = [];
137
+ const provider: LLMProvider = {
138
+ name: "mock",
139
+ async complete(params: CompletionParams): Promise<CompletionResult> {
140
+ receivedMessages = params.messages;
141
+ return {
142
+ content: "Answer with history.",
143
+ usage: { inputTokens: 100, outputTokens: 50 },
144
+ stopReason: "end_turn",
145
+ };
146
+ },
147
+ async *stream(): AsyncIterable<StreamChunk> {
148
+ yield { type: "text", text: "stream" };
149
+ },
150
+ };
151
+
152
+ await queryVault(root, "follow up question", provider, {
153
+ history: [
154
+ { role: "user", content: "previous question" },
155
+ { role: "assistant", content: "previous answer" },
156
+ ],
157
+ });
158
+
159
+ // Should include history + new question
160
+ expect(receivedMessages.length).toBe(3);
161
+ expect(receivedMessages[0]!.content).toBe("previous question");
162
+ expect(receivedMessages[1]!.content).toBe("previous answer");
163
+ expect(receivedMessages[2]!.content).toContain("follow up question");
164
+ });
165
+ });