akm-cli 0.7.1 → 0.7.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +35 -0
- package/dist/cli.js +62 -16
- package/dist/commands/history.js +2 -7
- package/dist/commands/info.js +2 -2
- package/dist/commands/installed-stashes.js +45 -1
- package/dist/commands/search.js +2 -2
- package/dist/commands/show.js +4 -19
- package/dist/commands/source-add.js +1 -1
- package/dist/core/common.js +16 -1
- package/dist/core/config.js +18 -3
- package/dist/indexer/db-search.js +33 -39
- package/dist/indexer/db.js +51 -1
- package/dist/indexer/graph-extraction.js +5 -3
- package/dist/indexer/indexer.js +334 -121
- package/dist/indexer/manifest.js +18 -23
- package/dist/indexer/memory-inference.js +47 -58
- package/dist/indexer/metadata.js +253 -21
- package/dist/indexer/search-source.js +11 -5
- package/dist/llm/client.js +61 -1
- package/dist/llm/embedder.js +8 -5
- package/dist/llm/embedders/local.js +8 -2
- package/dist/llm/embedders/remote.js +4 -2
- package/dist/llm/graph-extract.js +4 -4
- package/dist/llm/memory-infer.js +61 -33
- package/dist/llm/metadata-enhance.js +2 -2
- package/dist/output/cli-hints.js +5 -2
- package/dist/output/renderers.js +22 -49
- package/dist/registry/build-index.js +13 -18
- package/dist/setup/setup.js +238 -96
- package/dist/sources/providers/git.js +14 -2
- package/dist/sources/providers/website.js +4 -460
- package/dist/sources/website-ingest.js +470 -0
- package/dist/wiki/wiki.js +11 -1
- package/dist/workflows/parser.js +19 -4
- package/dist/workflows/runs.js +3 -3
- package/docs/README.md +10 -3
- package/docs/migration/release-notes/0.7.0.md +22 -0
- package/package.json +5 -2
package/dist/llm/client.js
CHANGED
|
@@ -51,8 +51,9 @@ export async function chatCompletion(config, messages, options) {
|
|
|
51
51
|
messages,
|
|
52
52
|
temperature: options?.temperature ?? config.temperature ?? 0.3,
|
|
53
53
|
max_tokens: options?.maxTokens ?? config.maxTokens ?? 512,
|
|
54
|
+
...config.extraParams,
|
|
54
55
|
}),
|
|
55
|
-
});
|
|
56
|
+
}, 30_000, options?.signal);
|
|
56
57
|
if (!response.ok) {
|
|
57
58
|
const rawBody = await response.text().catch(() => "");
|
|
58
59
|
const safeBody = redactErrorBody(rawBody);
|
|
@@ -65,6 +66,7 @@ export async function chatCompletion(config, messages, options) {
|
|
|
65
66
|
export function stripJsonFences(raw) {
|
|
66
67
|
return raw
|
|
67
68
|
.trim()
|
|
69
|
+
.replace(/<think>[\s\S]*?<\/think>/gi, "")
|
|
68
70
|
.replace(/^```(?:json)?\s*\n?/i, "")
|
|
69
71
|
.replace(/\n?```\s*$/i, "")
|
|
70
72
|
.trim();
|
|
@@ -78,6 +80,64 @@ export function parseJsonResponse(raw) {
|
|
|
78
80
|
return undefined;
|
|
79
81
|
}
|
|
80
82
|
}
|
|
83
|
+
/**
|
|
84
|
+
* Best-effort recovery for providers that wrap JSON in extra prose or fenced
|
|
85
|
+
* blocks. Extracts the first balanced top-level object/array and parses it.
|
|
86
|
+
*/
|
|
87
|
+
export function parseEmbeddedJsonResponse(raw) {
|
|
88
|
+
const direct = parseJsonResponse(raw);
|
|
89
|
+
if (direct !== undefined)
|
|
90
|
+
return direct;
|
|
91
|
+
const text = stripJsonFences(raw);
|
|
92
|
+
let arrayFallback;
|
|
93
|
+
for (let start = 0; start < text.length; start++) {
|
|
94
|
+
const opener = text[start];
|
|
95
|
+
if (opener !== "{" && opener !== "[")
|
|
96
|
+
continue;
|
|
97
|
+
const closer = opener === "{" ? "}" : "]";
|
|
98
|
+
let depth = 0;
|
|
99
|
+
let inString = false;
|
|
100
|
+
let escaped = false;
|
|
101
|
+
for (let i = start; i < text.length; i++) {
|
|
102
|
+
const ch = text[i];
|
|
103
|
+
if (inString) {
|
|
104
|
+
if (escaped) {
|
|
105
|
+
escaped = false;
|
|
106
|
+
}
|
|
107
|
+
else if (ch === "\\") {
|
|
108
|
+
escaped = true;
|
|
109
|
+
}
|
|
110
|
+
else if (ch === '"') {
|
|
111
|
+
inString = false;
|
|
112
|
+
}
|
|
113
|
+
continue;
|
|
114
|
+
}
|
|
115
|
+
if (ch === '"') {
|
|
116
|
+
inString = true;
|
|
117
|
+
continue;
|
|
118
|
+
}
|
|
119
|
+
if (ch === opener)
|
|
120
|
+
depth += 1;
|
|
121
|
+
if (ch === closer) {
|
|
122
|
+
depth -= 1;
|
|
123
|
+
if (depth === 0) {
|
|
124
|
+
try {
|
|
125
|
+
const parsed = JSON.parse(text.slice(start, i + 1));
|
|
126
|
+
if (!Array.isArray(parsed)) {
|
|
127
|
+
return parsed;
|
|
128
|
+
}
|
|
129
|
+
arrayFallback ??= parsed;
|
|
130
|
+
break;
|
|
131
|
+
}
|
|
132
|
+
catch {
|
|
133
|
+
break;
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
return arrayFallback;
|
|
140
|
+
}
|
|
81
141
|
// ── Availability check ──────────────────────────────────────────────────────
|
|
82
142
|
/**
|
|
83
143
|
* Check if the LLM endpoint is reachable.
|
package/dist/llm/embedder.js
CHANGED
|
@@ -47,14 +47,14 @@ export function resetLocalEmbedder() {
|
|
|
47
47
|
* Results are cached in an LRU cache (max ~100 entries) keyed by query text
|
|
48
48
|
* and embedding config. Repeated identical queries return the cached vector.
|
|
49
49
|
*/
|
|
50
|
-
export async function embed(text, embeddingConfig) {
|
|
50
|
+
export async function embed(text, embeddingConfig, signal) {
|
|
51
51
|
const key = embedCacheKey(text, embeddingConfig);
|
|
52
52
|
const cached = getCachedEmbedding(key);
|
|
53
53
|
if (cached)
|
|
54
54
|
return cached;
|
|
55
55
|
const result = embeddingConfig && hasRemoteEndpoint(embeddingConfig)
|
|
56
|
-
? await new RemoteEmbedder(embeddingConfig).embed(text)
|
|
57
|
-
: await localEmbedder.
|
|
56
|
+
? await new RemoteEmbedder(embeddingConfig).embed(text, signal)
|
|
57
|
+
: await localEmbedder.embed(text, signal);
|
|
58
58
|
setCachedEmbedding(key, result);
|
|
59
59
|
return result;
|
|
60
60
|
}
|
|
@@ -63,16 +63,19 @@ export async function embed(text, embeddingConfig) {
|
|
|
63
63
|
* Uses the OpenAI-compatible batch API for remote endpoints (batches of 100).
|
|
64
64
|
* Falls back to sequential embedding for the local transformer pipeline.
|
|
65
65
|
*/
|
|
66
|
-
export async function embedBatch(texts, embeddingConfig) {
|
|
66
|
+
export async function embedBatch(texts, embeddingConfig, signal) {
|
|
67
67
|
if (texts.length === 0)
|
|
68
68
|
return [];
|
|
69
69
|
if (embeddingConfig && hasRemoteEndpoint(embeddingConfig)) {
|
|
70
|
-
return new RemoteEmbedder(embeddingConfig).embedBatch(texts);
|
|
70
|
+
return new RemoteEmbedder(embeddingConfig).embedBatch(texts, signal);
|
|
71
71
|
}
|
|
72
72
|
// Local transformer: process sequentially (pipeline handles one at a time)
|
|
73
73
|
const localModel = embeddingConfig?.localModel;
|
|
74
74
|
const results = [];
|
|
75
75
|
for (const text of texts) {
|
|
76
|
+
if (signal?.aborted) {
|
|
77
|
+
throw signal.reason instanceof Error ? signal.reason : new Error("embedding interrupted");
|
|
78
|
+
}
|
|
76
79
|
results.push(await localEmbedder.embedWithModel(text, localModel));
|
|
77
80
|
}
|
|
78
81
|
return results;
|
|
@@ -42,14 +42,20 @@ export class LocalEmbedder {
|
|
|
42
42
|
this.pipelinePromise = undefined;
|
|
43
43
|
this.pipelineModelName = undefined;
|
|
44
44
|
}
|
|
45
|
-
async embed(text) {
|
|
45
|
+
async embed(text, signal) {
|
|
46
|
+
if (signal?.aborted) {
|
|
47
|
+
throw signal.reason instanceof Error ? signal.reason : new Error("embedding interrupted");
|
|
48
|
+
}
|
|
46
49
|
return this.embedWithModel(text, this.defaultModel);
|
|
47
50
|
}
|
|
48
|
-
async embedBatch(texts) {
|
|
51
|
+
async embedBatch(texts, signal) {
|
|
49
52
|
if (texts.length === 0)
|
|
50
53
|
return [];
|
|
51
54
|
const results = [];
|
|
52
55
|
for (const text of texts) {
|
|
56
|
+
if (signal?.aborted) {
|
|
57
|
+
throw signal.reason instanceof Error ? signal.reason : new Error("embedding interrupted");
|
|
58
|
+
}
|
|
53
59
|
results.push(await this.embedWithModel(text, this.defaultModel));
|
|
54
60
|
}
|
|
55
61
|
return results;
|
|
@@ -15,7 +15,7 @@ export class RemoteEmbedder {
|
|
|
15
15
|
constructor(config) {
|
|
16
16
|
this.config = config;
|
|
17
17
|
}
|
|
18
|
-
async embed(text) {
|
|
18
|
+
async embed(text, signal) {
|
|
19
19
|
const headers = this.buildHeaders();
|
|
20
20
|
const body = {
|
|
21
21
|
input: text,
|
|
@@ -32,6 +32,7 @@ export class RemoteEmbedder {
|
|
|
32
32
|
method: "POST",
|
|
33
33
|
headers,
|
|
34
34
|
body: JSON.stringify(body),
|
|
35
|
+
signal,
|
|
35
36
|
});
|
|
36
37
|
if (!response.ok) {
|
|
37
38
|
const errBody = await response.text().catch(() => "");
|
|
@@ -43,7 +44,7 @@ export class RemoteEmbedder {
|
|
|
43
44
|
}
|
|
44
45
|
return l2Normalize(json.data[0].embedding);
|
|
45
46
|
}
|
|
46
|
-
async embedBatch(texts) {
|
|
47
|
+
async embedBatch(texts, signal) {
|
|
47
48
|
if (texts.length === 0)
|
|
48
49
|
return [];
|
|
49
50
|
const results = [];
|
|
@@ -66,6 +67,7 @@ export class RemoteEmbedder {
|
|
|
66
67
|
method: "POST",
|
|
67
68
|
headers,
|
|
68
69
|
body: JSON.stringify(body),
|
|
70
|
+
signal,
|
|
69
71
|
});
|
|
70
72
|
if (!response.ok) {
|
|
71
73
|
const respBody = await response.text().catch(() => "");
|
|
@@ -19,7 +19,7 @@
|
|
|
19
19
|
*/
|
|
20
20
|
import { toErrorMessage } from "../core/common";
|
|
21
21
|
import { warn } from "../core/warn";
|
|
22
|
-
import { chatCompletion,
|
|
22
|
+
import { chatCompletion, parseEmbeddedJsonResponse } from "./client";
|
|
23
23
|
/** Hard cap on body chars sent to the model. */
|
|
24
24
|
const MAX_BODY_CHARS = 4000;
|
|
25
25
|
/** Hard cap on entities returned per asset — guards against runaway LLM output. */
|
|
@@ -49,7 +49,7 @@ Asset body:
|
|
|
49
49
|
* JSON, empty response). Errors are logged via `warn()` but never thrown — a
|
|
50
50
|
* failed extraction for one asset must not abort the rest of the index pass.
|
|
51
51
|
*/
|
|
52
|
-
export async function extractGraphFromBody(llmConfig, body) {
|
|
52
|
+
export async function extractGraphFromBody(llmConfig, body, signal) {
|
|
53
53
|
const empty = { entities: [], relations: [] };
|
|
54
54
|
const trimmedBody = body.trim();
|
|
55
55
|
if (!trimmedBody)
|
|
@@ -61,14 +61,14 @@ export async function extractGraphFromBody(llmConfig, body) {
|
|
|
61
61
|
chatCompletion(llmConfig, [
|
|
62
62
|
{ role: "system", content: SYSTEM_PROMPT },
|
|
63
63
|
{ role: "user", content: userPrompt },
|
|
64
|
-
], { maxTokens: 1024, temperature: 0.1 }),
|
|
64
|
+
], { maxTokens: 1024, temperature: 0.1, signal }),
|
|
65
65
|
new Promise((_, reject) => {
|
|
66
66
|
timeoutHandle = setTimeout(() => reject(new Error("graph extraction timed out")), LLM_TIMEOUT_MS);
|
|
67
67
|
}),
|
|
68
68
|
]);
|
|
69
69
|
if (!raw)
|
|
70
70
|
return empty;
|
|
71
|
-
const parsed =
|
|
71
|
+
const parsed = parseEmbeddedJsonResponse(raw);
|
|
72
72
|
if (!parsed) {
|
|
73
73
|
warn("graph extraction: invalid JSON response from LLM; skipping asset.");
|
|
74
74
|
return empty;
|
package/dist/llm/memory-infer.js
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* LLM helper for the `akm index` memory-inference pass (#201).
|
|
3
3
|
*
|
|
4
|
-
*
|
|
5
|
-
* (in `src/indexer/memory-inference.ts`) is responsible for
|
|
6
|
-
* memories are pending, persisting the
|
|
7
|
-
* correct frontmatter (`inferred: true`, `source: <parent-ref>`), and
|
|
8
|
-
*
|
|
4
|
+
* Compresses a single memory body into one higher-signal derived memory. The
|
|
5
|
+
* pass itself (in `src/indexer/memory-inference.ts`) is responsible for
|
|
6
|
+
* deciding which memories are pending, persisting the derived memory with the
|
|
7
|
+
* correct frontmatter (`inferred: true`, `source: <parent-ref>`), and marking
|
|
8
|
+
* the parent as processed for idempotency.
|
|
9
9
|
*
|
|
10
10
|
* This module is intentionally tiny and stateless so tests can stub it via
|
|
11
11
|
* `mock.module("../src/llm/memory-infer", ...)` without hitting a network.
|
|
@@ -17,38 +17,50 @@
|
|
|
17
17
|
*/
|
|
18
18
|
import { toErrorMessage } from "../core/common";
|
|
19
19
|
import { warn } from "../core/warn";
|
|
20
|
-
import { chatCompletion,
|
|
20
|
+
import { chatCompletion, parseEmbeddedJsonResponse } from "./client";
|
|
21
21
|
/** Hard cap on body chars sent to the model — pragmatic and matches `runLlmEnrich`. */
|
|
22
22
|
const MAX_BODY_CHARS = 4000;
|
|
23
|
-
/** Hard cap on the number of atomic facts returned per memory. */
|
|
24
|
-
const MAX_FACTS_PER_MEMORY = 16;
|
|
25
23
|
/** Hard timeout for the LLM call. The index run must not hang on a misbehaving endpoint. */
|
|
26
24
|
const LLM_TIMEOUT_MS = 30_000;
|
|
27
|
-
const SYSTEM_PROMPT = "You
|
|
28
|
-
"Return only valid JSON. No prose
|
|
29
|
-
const USER_PROMPT_PREFIX = `
|
|
25
|
+
const SYSTEM_PROMPT = "You compress a developer memory into one high-signal derived memory for later retrieval. " +
|
|
26
|
+
"Return only valid JSON. No prose outside the JSON object. No markdown fences.";
|
|
27
|
+
const USER_PROMPT_PREFIX = `Compress the memory below into one concise, information-dense derived memory.
|
|
30
28
|
|
|
31
29
|
Rules:
|
|
32
|
-
- Output ONLY a JSON object: {"
|
|
33
|
-
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
-
|
|
37
|
-
|
|
30
|
+
- Output ONLY a JSON object with exactly these keys: {"title": string, "description": string, "tags": string[], "searchHints": string[], "content": string}.
|
|
31
|
+
- ` +
|
|
32
|
+
'"title"' +
|
|
33
|
+
` is a short, descriptive title for the derived memory.
|
|
34
|
+
- ` +
|
|
35
|
+
'"description"' +
|
|
36
|
+
` is one sentence explaining why this derived memory matters.
|
|
37
|
+
- ` +
|
|
38
|
+
'"tags"' +
|
|
39
|
+
` contains 3-8 specific keywords.
|
|
40
|
+
- ` +
|
|
41
|
+
'"searchHints"' +
|
|
42
|
+
` contains 3-6 natural-language retrieval phrases.
|
|
43
|
+
- ` +
|
|
44
|
+
'"content"' +
|
|
45
|
+
` must be compact markdown that preserves the reusable insight, root cause, fix, constraints, and applicability conditions when present.
|
|
46
|
+
- Prefer 2-4 short sections with informative headings over long prose.
|
|
47
|
+
- Omit timestamps, verification-only metrics, pleasantries, and session-specific chatter unless they are essential to applying the insight later.
|
|
48
|
+
- Preserve technical specifics (names, versions, identifiers, selectors, file paths, config keys) verbatim.
|
|
38
49
|
|
|
39
50
|
Memory:
|
|
40
51
|
`;
|
|
41
52
|
/**
|
|
42
|
-
*
|
|
53
|
+
* Compress a single memory body into one derived memory via the configured LLM.
|
|
43
54
|
*
|
|
44
|
-
* Returns `
|
|
55
|
+
* Returns `undefined` on any failure (timeout, invalid JSON, empty response).
|
|
56
|
+
* Errors
|
|
45
57
|
* are logged via `warn()` but never thrown — a failed split for one memory
|
|
46
58
|
* must not abort the rest of the index pass.
|
|
47
59
|
*/
|
|
48
|
-
export async function
|
|
60
|
+
export async function compressMemoryToDerivedMemory(llmConfig, body, signal) {
|
|
49
61
|
const trimmedBody = body.trim();
|
|
50
62
|
if (!trimmedBody)
|
|
51
|
-
return
|
|
63
|
+
return undefined;
|
|
52
64
|
const userPrompt = `${USER_PROMPT_PREFIX}${trimmedBody.slice(0, MAX_BODY_CHARS)}`;
|
|
53
65
|
let timeoutHandle;
|
|
54
66
|
try {
|
|
@@ -56,28 +68,44 @@ export async function splitMemoryIntoAtomicFacts(llmConfig, body) {
|
|
|
56
68
|
chatCompletion(llmConfig, [
|
|
57
69
|
{ role: "system", content: SYSTEM_PROMPT },
|
|
58
70
|
{ role: "user", content: userPrompt },
|
|
59
|
-
], { maxTokens: 768, temperature: 0.1 }),
|
|
71
|
+
], { maxTokens: 768, temperature: 0.1, signal }),
|
|
60
72
|
new Promise((_, reject) => {
|
|
61
73
|
timeoutHandle = setTimeout(() => reject(new Error("memory inference timed out")), LLM_TIMEOUT_MS);
|
|
62
74
|
}),
|
|
63
75
|
]);
|
|
64
76
|
if (!raw)
|
|
65
|
-
return
|
|
66
|
-
const parsed =
|
|
67
|
-
if (!parsed
|
|
77
|
+
return undefined;
|
|
78
|
+
const parsed = parseEmbeddedJsonResponse(raw);
|
|
79
|
+
if (!parsed) {
|
|
68
80
|
warn("memory inference: invalid JSON response from LLM; skipping memory.");
|
|
69
|
-
return
|
|
81
|
+
return undefined;
|
|
70
82
|
}
|
|
71
|
-
const
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
.
|
|
76
|
-
|
|
83
|
+
const title = typeof parsed.title === "string" ? parsed.title.trim() : "";
|
|
84
|
+
const description = typeof parsed.description === "string" ? parsed.description.trim() : "";
|
|
85
|
+
const content = typeof parsed.content === "string" ? parsed.content.trim() : "";
|
|
86
|
+
const tags = Array.isArray(parsed.tags)
|
|
87
|
+
? parsed.tags
|
|
88
|
+
.filter((t) => typeof t === "string")
|
|
89
|
+
.map((t) => t.trim())
|
|
90
|
+
.filter(Boolean)
|
|
91
|
+
.slice(0, 8)
|
|
92
|
+
: [];
|
|
93
|
+
const searchHints = Array.isArray(parsed.searchHints)
|
|
94
|
+
? parsed.searchHints
|
|
95
|
+
.filter((h) => typeof h === "string")
|
|
96
|
+
.map((h) => h.trim())
|
|
97
|
+
.filter(Boolean)
|
|
98
|
+
.slice(0, 6)
|
|
99
|
+
: [];
|
|
100
|
+
if (!title || !description || !content || tags.length === 0 || searchHints.length === 0) {
|
|
101
|
+
warn("memory inference: incomplete derived memory payload from LLM; skipping memory.");
|
|
102
|
+
return undefined;
|
|
103
|
+
}
|
|
104
|
+
return { title, description, tags, searchHints, content };
|
|
77
105
|
}
|
|
78
106
|
catch (err) {
|
|
79
107
|
warn(`memory inference failed: ${toErrorMessage(err)}`);
|
|
80
|
-
return
|
|
108
|
+
return undefined;
|
|
81
109
|
}
|
|
82
110
|
finally {
|
|
83
111
|
if (timeoutHandle !== undefined)
|
|
@@ -11,7 +11,7 @@ const SYSTEM_PROMPT = `You are a metadata generator for a developer asset regist
|
|
|
11
11
|
* Use an LLM to enhance a stash entry's metadata: improve description,
|
|
12
12
|
* generate searchHints, and suggest tags.
|
|
13
13
|
*/
|
|
14
|
-
export async function enhanceMetadata(config, entry, fileContent) {
|
|
14
|
+
export async function enhanceMetadata(config, entry, fileContent, signal) {
|
|
15
15
|
const contextParts = [`Name: ${entry.name}`, `Type: ${entry.type}`];
|
|
16
16
|
if (entry.description)
|
|
17
17
|
contextParts.push(`Current description: ${entry.description}`);
|
|
@@ -33,7 +33,7 @@ Return ONLY the JSON object, no explanation.`;
|
|
|
33
33
|
const raw = await chatCompletion(config, [
|
|
34
34
|
{ role: "system", content: SYSTEM_PROMPT },
|
|
35
35
|
{ role: "user", content: userPrompt },
|
|
36
|
-
]);
|
|
36
|
+
], { signal });
|
|
37
37
|
const parsed = parseJsonResponse(raw);
|
|
38
38
|
if (!parsed)
|
|
39
39
|
return {};
|
package/dist/output/cli-hints.js
CHANGED
|
@@ -133,6 +133,7 @@ akm remember "Deployment needs VPN access" # Record a memory in your stash
|
|
|
133
133
|
akm remember --name release-retro < notes.md # Save multiline memory from stdin
|
|
134
134
|
akm import ./docs/auth-flow.md # Import a file as knowledge
|
|
135
135
|
akm import - --name scratch-notes < notes.md # Import stdin as a knowledge doc
|
|
136
|
+
akm import https://example.com/docs/auth # Fetch one URL and import it as knowledge
|
|
136
137
|
akm workflow create ship-release # Create a workflow asset in the stash
|
|
137
138
|
akm workflow validate workflows/foo.md # Validate a workflow file or ref; lists every error
|
|
138
139
|
akm workflow next workflow:ship-release # Start or resume the next workflow step
|
|
@@ -160,6 +161,7 @@ akm wiki show research # Path, description, counts, last
|
|
|
160
161
|
akm wiki pages research # Page refs + descriptions (excludes schema/index/log; includes raw/)
|
|
161
162
|
akm wiki search research "attention" # Scoped search (equivalent to --type wiki --wiki research)
|
|
162
163
|
akm wiki stash research ./paper.md # Copy source into raw/<slug>.md (never overwrites)
|
|
164
|
+
akm wiki stash research https://example.com/paper # Fetch one URL into raw/<slug>.md
|
|
163
165
|
echo "..." | akm wiki stash research - # stdin form
|
|
164
166
|
akm wiki lint research # Structural checks: orphans, broken xrefs, uncited raws, stale index
|
|
165
167
|
akm wiki ingest research # Print the ingest workflow for this wiki (no action)
|
|
@@ -285,8 +287,9 @@ akm config path --all # Show all config paths
|
|
|
285
287
|
|
|
286
288
|
\`\`\`sh
|
|
287
289
|
akm init # Initialize working stash
|
|
288
|
-
akm index # Rebuild search index
|
|
289
|
-
akm index --full # Full reindex
|
|
290
|
+
akm index # Rebuild search index (no LLM enrichment)
|
|
291
|
+
akm index --full # Full reindex (no LLM enrichment)
|
|
292
|
+
akm index --enrich # Reindex with LLM inference/enrichment passes
|
|
290
293
|
akm list # List all sources
|
|
291
294
|
akm upgrade # Upgrade akm using its install method
|
|
292
295
|
akm upgrade --check # Check for updates
|
package/dist/output/renderers.js
CHANGED
|
@@ -13,7 +13,7 @@ import { hasErrnoCode } from "../core/common";
|
|
|
13
13
|
import { parseFrontmatter, toStringOrUndefined } from "../core/frontmatter";
|
|
14
14
|
import { extractFrontmatterOnly, extractLineRange, extractSection, formatToc, parseMarkdownToc, } from "../core/markdown";
|
|
15
15
|
import { registerRenderer } from "../indexer/file-context";
|
|
16
|
-
import {
|
|
16
|
+
import { extractCommentMetadata, extractDescriptionFromComments } from "../indexer/metadata";
|
|
17
17
|
import { buildWorkflowAction, workflowMdRenderer } from "../workflows/renderer";
|
|
18
18
|
// ── Interpreter auto-detection map ───────────────────────────────────────────
|
|
19
19
|
const INTERPRETER_MAP = {
|
|
@@ -49,36 +49,12 @@ const SETUP_SIGNALS = {
|
|
|
49
49
|
* `@run <value>`, `@setup <value>`, or `@cwd <value>`.
|
|
50
50
|
*/
|
|
51
51
|
export function extractCommentTags(filePath) {
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
}
|
|
59
|
-
const lines = content.split(/\r?\n/, 50);
|
|
60
|
-
const hints = {};
|
|
61
|
-
for (const line of lines) {
|
|
62
|
-
const trimmed = line.trim();
|
|
63
|
-
// Match lines starting with comment markers: //, #, /*, *, ;, --
|
|
64
|
-
if (!/^(?:\/\/|#|\/?\*|;|--)/.test(trimmed) && !trimmed.startsWith("'"))
|
|
65
|
-
continue;
|
|
66
|
-
// Strip comment prefix
|
|
67
|
-
const cleaned = trimmed
|
|
68
|
-
.replace(/^(?:\/\/|##?|\/?\*\*?\/?|;|--)\s*/, "")
|
|
69
|
-
.replace(/\*\/\s*$/, "")
|
|
70
|
-
.trim();
|
|
71
|
-
const runMatch = cleaned.match(/^@run\s+(.+)/);
|
|
72
|
-
if (runMatch)
|
|
73
|
-
hints.run = runMatch[1].trim();
|
|
74
|
-
const setupMatch = cleaned.match(/^@setup\s+(.+)/);
|
|
75
|
-
if (setupMatch)
|
|
76
|
-
hints.setup = setupMatch[1].trim();
|
|
77
|
-
const cwdMatch = cleaned.match(/^@cwd\s+(.+)/);
|
|
78
|
-
if (cwdMatch)
|
|
79
|
-
hints.cwd = cwdMatch[1].trim();
|
|
80
|
-
}
|
|
81
|
-
return hints;
|
|
52
|
+
const metadata = extractCommentMetadata(filePath);
|
|
53
|
+
return {
|
|
54
|
+
run: metadata?.run,
|
|
55
|
+
setup: metadata?.setup,
|
|
56
|
+
cwd: metadata?.cwd,
|
|
57
|
+
};
|
|
82
58
|
}
|
|
83
59
|
// ── Auto-detection ───────────────────────────────────────────────────────────
|
|
84
60
|
/**
|
|
@@ -118,9 +94,9 @@ export function detectExecHints(filePath) {
|
|
|
118
94
|
* Resolve execution hints for a script asset.
|
|
119
95
|
*
|
|
120
96
|
* Resolution order (first non-empty value wins for each field):
|
|
121
|
-
* 1.
|
|
122
|
-
* 2. Script file header comments (`@run`/`@setup`/`@cwd`)
|
|
123
|
-
* 3. Auto-detection from extension + dependency files
|
|
97
|
+
* 1. Indexed entry metadata (`run`/`setup`/`cwd`) when supplied by the caller
|
|
98
|
+
* 2. Script file header comments (`@run`/`@setup`/`@cwd`)
|
|
99
|
+
* 3. Auto-detection from extension + dependency files
|
|
124
100
|
*/
|
|
125
101
|
export function resolveExecHints(stashEntry, filePath) {
|
|
126
102
|
const stashHints = {
|
|
@@ -152,17 +128,6 @@ function deriveName(ctx) {
|
|
|
152
128
|
return ext ? ctx.relPath.slice(0, -ext.length) : ctx.relPath;
|
|
153
129
|
}
|
|
154
130
|
export { buildWorkflowAction };
|
|
155
|
-
/**
|
|
156
|
-
* Load the matching StashEntry for a file path from the directory's .stash.json.
|
|
157
|
-
*/
|
|
158
|
-
function findStashEntryForFile(filePath) {
|
|
159
|
-
const dir = path.dirname(filePath);
|
|
160
|
-
const stashFile = loadStashFile(dir);
|
|
161
|
-
if (!stashFile)
|
|
162
|
-
return undefined;
|
|
163
|
-
const fileName = path.basename(filePath);
|
|
164
|
-
return stashFile.entries.find((e) => e.filename === fileName);
|
|
165
|
-
}
|
|
166
131
|
function extractParameters(template) {
|
|
167
132
|
const parameters = [];
|
|
168
133
|
if (/\$ARGUMENTS\b/i.test(template)) {
|
|
@@ -182,18 +147,26 @@ function extractParameters(template) {
|
|
|
182
147
|
}
|
|
183
148
|
return parameters.length > 0 ? parameters : undefined;
|
|
184
149
|
}
|
|
150
|
+
function readFrontmatterTags(value) {
|
|
151
|
+
if (!Array.isArray(value))
|
|
152
|
+
return undefined;
|
|
153
|
+
const tags = value.filter((tag) => typeof tag === "string" && tag.trim().length > 0);
|
|
154
|
+
return tags.length > 0 ? tags : undefined;
|
|
155
|
+
}
|
|
185
156
|
// ── 1. skill-md ──────────────────────────────────────────────────────────────
|
|
186
157
|
const skillMdRenderer = {
|
|
187
158
|
name: "skill-md",
|
|
188
159
|
buildShowResponse(ctx) {
|
|
189
160
|
const name = deriveName(ctx);
|
|
190
161
|
const parsed = parseFrontmatter(ctx.content());
|
|
162
|
+
const tags = readFrontmatterTags(parsed.data.tags);
|
|
191
163
|
return {
|
|
192
164
|
type: "skill",
|
|
193
165
|
name,
|
|
194
166
|
path: ctx.absPath,
|
|
195
167
|
action: "Read and follow the instructions below",
|
|
196
168
|
description: toStringOrUndefined(parsed.data.description),
|
|
169
|
+
...(tags ? { tags } : {}),
|
|
197
170
|
content: parsed.content,
|
|
198
171
|
};
|
|
199
172
|
},
|
|
@@ -205,12 +178,14 @@ const commandMdRenderer = {
|
|
|
205
178
|
const name = deriveName(ctx);
|
|
206
179
|
const parsedMd = parseFrontmatter(ctx.content());
|
|
207
180
|
const template = parsedMd.content;
|
|
181
|
+
const tags = readFrontmatterTags(parsedMd.data.tags);
|
|
208
182
|
return {
|
|
209
183
|
type: "command",
|
|
210
184
|
name,
|
|
211
185
|
path: ctx.absPath,
|
|
212
186
|
action: "Fill $ARGUMENTS placeholders in the template, then dispatch",
|
|
213
187
|
description: toStringOrUndefined(parsedMd.data.description),
|
|
188
|
+
...(tags ? { tags } : {}),
|
|
214
189
|
template,
|
|
215
190
|
modelHint: typeof parsedMd.data.model === "string" ? parsedMd.data.model : undefined,
|
|
216
191
|
agent: toStringOrUndefined(parsedMd.data.agent),
|
|
@@ -527,8 +502,7 @@ const scriptSourceRenderer = {
|
|
|
527
502
|
const ext = path.extname(ctx.absPath).toLowerCase();
|
|
528
503
|
// For extensions with a known interpreter, show exec hints
|
|
529
504
|
if (INTERPRETER_MAP[ext]) {
|
|
530
|
-
const
|
|
531
|
-
const hints = resolveExecHints(stashEntry, ctx.absPath);
|
|
505
|
+
const hints = resolveExecHints(undefined, ctx.absPath);
|
|
532
506
|
if (hints.run) {
|
|
533
507
|
return {
|
|
534
508
|
type: "script",
|
|
@@ -555,8 +529,7 @@ const scriptSourceRenderer = {
|
|
|
555
529
|
if (!INTERPRETER_MAP[ext])
|
|
556
530
|
return;
|
|
557
531
|
try {
|
|
558
|
-
const
|
|
559
|
-
const hints = resolveExecHints(stashEntry, hit.path);
|
|
532
|
+
const hints = resolveExecHints(undefined, hit.path);
|
|
560
533
|
hit.run = hints.run;
|
|
561
534
|
}
|
|
562
535
|
catch (error) {
|
|
@@ -245,27 +245,22 @@ async function enumerateAssets(stashRoot) {
|
|
|
245
245
|
}
|
|
246
246
|
const entries = [];
|
|
247
247
|
for (const [dirPath, files] of dirGroups) {
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
}
|
|
258
|
-
}
|
|
259
|
-
else {
|
|
260
|
-
const generated = await generateMetadataFlat(stashRoot, files);
|
|
261
|
-
if (generated.entries.length === 0)
|
|
262
|
-
continue;
|
|
263
|
-
stash = generated;
|
|
264
|
-
}
|
|
265
|
-
entries.push(...stash.entries.map((entry) => attachFileSize(dirPath, entry)));
|
|
248
|
+
const generated = await generateMetadataFlat(stashRoot, files);
|
|
249
|
+
const legacyOverrides = loadStashFile(dirPath, { requireFilename: true });
|
|
250
|
+
const mergedEntries = legacyOverrides
|
|
251
|
+
? generated.entries.map((entry) => mergeLegacyEntry(entry, legacyOverrides.entries))
|
|
252
|
+
: generated.entries;
|
|
253
|
+
const stash = mergedEntries.length > 0 ? { entries: mergedEntries } : legacyOverrides;
|
|
254
|
+
if (!stash || stash.entries.length === 0)
|
|
255
|
+
continue;
|
|
256
|
+
entries.push(...stash.entries.filter((entry) => !!entry.filename).map((entry) => attachFileSize(dirPath, entry)));
|
|
266
257
|
}
|
|
267
258
|
return entries.sort((a, b) => `${a.type}:${a.name}`.localeCompare(`${b.type}:${b.name}`));
|
|
268
259
|
}
|
|
260
|
+
function mergeLegacyEntry(entry, legacyEntries) {
|
|
261
|
+
const legacy = legacyEntries.find((candidate) => candidate.filename === entry.filename);
|
|
262
|
+
return legacy ? { ...entry, ...legacy, filename: entry.filename } : entry;
|
|
263
|
+
}
|
|
269
264
|
function attachFileSize(dirPath, entry) {
|
|
270
265
|
if (typeof entry.fileSize === "number" || !entry.filename)
|
|
271
266
|
return entry;
|