akm-cli 0.4.1 → 0.5.0-rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/setup.js CHANGED
@@ -14,6 +14,7 @@ import { detectAgentPlatforms, detectOllama, detectOpenViking } from "./detect";
14
14
  import { checkEmbeddingAvailability, DEFAULT_LOCAL_MODEL, isTransformersAvailable } from "./embedder";
15
15
  import { akmIndex } from "./indexer";
16
16
  import { akmInit } from "./init";
17
+ import { probeLlmCapabilities } from "./llm";
17
18
  import { getDefaultStashDir } from "./paths";
18
19
  import { clearSemanticStatus, deriveSemanticProviderFingerprint, writeSemanticStatus } from "./semantic-status";
19
20
  // ── Constants ───────────────────────────────────────────────────────────────
@@ -236,8 +237,8 @@ async function stepOllama(current) {
236
237
  spin.stop("Ollama not detected");
237
238
  p.log.info("Ollama is not running. Embeddings will use the built-in local model.\n" +
238
239
  "To use Ollama later, install it from https://ollama.com and re-run `akm setup`.");
239
- // Preserve existing embedding/LLM config when Ollama is not available
240
- return { embedding: current.embedding, llm: current.llm };
240
+ // Preserve existing embedding config when Ollama is not available
241
+ return { embedding: current.embedding };
241
242
  }
242
243
  spin.stop(`Ollama detected at ${ollama.endpoint}`);
243
244
  if (ollama.models.length > 0) {
@@ -300,44 +301,162 @@ async function stepOllama(current) {
300
301
  };
301
302
  }
302
303
  // else: undefined → use built-in local
303
- // LLM model selection
304
- const chatModels = ollama.models.filter((m) => !embeddingModels.includes(m));
305
- const allLlmCandidates = chatModels.length > 0 ? chatModels : ollama.models;
306
- let llm;
307
- const llmOptions = [];
308
- for (const m of allLlmCandidates) {
309
- llmOptions.push({ value: m, label: m, hint: "Ollama" });
310
- }
311
- llmOptions.push({
312
- value: "none",
313
- label: "Skip LLM enhancement",
314
- hint: "use heuristic metadata",
315
- });
304
+ // Surface Ollama details to the LLM step so it can offer Ollama as a preset.
305
+ const ollamaChatModels = ollama.models.filter((m) => !embeddingModels.includes(m));
306
+ return { embedding, ollamaEndpoint: ollama.endpoint, ollamaChatModels };
307
+ }
308
+ const LLM_PRESETS = [
309
+ {
310
+ value: "anthropic",
311
+ label: "Anthropic Claude (OpenAI SDK compat beta)",
312
+ endpoint: "https://api.anthropic.com/v1/chat/completions",
313
+ defaultModel: "claude-sonnet-4-5",
314
+ hint: "beta OpenAI-compat layer; set AKM_LLM_API_KEY; override the model if the default is unavailable",
315
+ contextWindow: 200_000,
316
+ },
317
+ {
318
+ value: "openai",
319
+ label: "OpenAI",
320
+ endpoint: "https://api.openai.com/v1/chat/completions",
321
+ defaultModel: "gpt-4o-mini",
322
+ hint: "AKM_LLM_API_KEY required",
323
+ contextWindow: 128_000,
324
+ },
325
+ {
326
+ value: "google",
327
+ label: "Google Gemini (OpenAI-compat)",
328
+ endpoint: "https://generativelanguage.googleapis.com/v1beta/openai/chat/completions",
329
+ defaultModel: "gemini-2.0-flash",
330
+ hint: "OpenAI-compat endpoint, AKM_LLM_API_KEY required",
331
+ contextWindow: 1_000_000,
332
+ },
333
+ ];
334
+ /**
335
+ * Step 3a: pick an LLM provider. Used for indexing-time metadata enhancement.
336
+ *
337
+ * @internal Exported for testing only.
338
+ */
339
+ export async function stepLlm(current, ollamaEndpoint, ollamaChatModels) {
340
+ const options = LLM_PRESETS.map((preset) => ({
341
+ value: preset.value,
342
+ label: preset.label,
343
+ hint: preset.hint,
344
+ }));
345
+ const ollamaAvailable = Boolean(ollamaEndpoint && ollamaChatModels && ollamaChatModels.length > 0);
346
+ if (ollamaAvailable) {
347
+ options.push({
348
+ value: "ollama",
349
+ label: "Ollama (local)",
350
+ hint: ollamaChatModels?.[0] ?? "local",
351
+ });
352
+ }
353
+ options.push({ value: "custom", label: "Custom OpenAI-compatible endpoint" });
354
+ options.push({ value: "none", label: "Skip LLM", hint: "no metadata enhancement during indexing" });
316
355
  if (current.llm) {
317
- llmOptions.push({
356
+ options.push({
318
357
  value: "keep",
319
358
  label: `Keep current: ${current.llm.provider ?? current.llm.endpoint}`,
320
359
  hint: current.llm.model,
321
360
  });
322
361
  }
323
- const llmChoice = await prompt(() => p.select({
324
- message: "Use an LLM for richer metadata during indexing?",
325
- options: llmOptions,
326
- initialValue: allLlmCandidates.length > 0 ? allLlmCandidates[0] : "none",
362
+ const initialValue = current.llm ? "keep" : ollamaAvailable ? "ollama" : (LLM_PRESETS[0]?.value ?? "none");
363
+ const choice = await prompt(() => p.select({
364
+ message: "Configure an LLM for richer metadata during indexing:",
365
+ options,
366
+ initialValue,
327
367
  }));
328
- if (llmChoice === "keep") {
329
- llm = current.llm;
330
- }
331
- else if (llmChoice !== "none") {
368
+ if (choice === "keep")
369
+ return current.llm;
370
+ if (choice === "none")
371
+ return undefined;
372
+ let llm;
373
+ if (choice === "ollama") {
374
+ const modelChoice = await prompt(() => p.select({
375
+ message: "Which Ollama model?",
376
+ options: (ollamaChatModels ?? []).map((m) => ({ value: m, label: m })),
377
+ initialValue: ollamaChatModels?.[0],
378
+ }));
332
379
  llm = {
333
380
  provider: "ollama",
334
- endpoint: `${ollama.endpoint}/v1/chat/completions`,
335
- model: llmChoice,
381
+ endpoint: `${ollamaEndpoint}/v1/chat/completions`,
382
+ model: modelChoice,
383
+ temperature: 0.3,
384
+ maxTokens: 1024,
385
+ };
386
+ }
387
+ else if (choice === "custom") {
388
+ const endpoint = await prompt(() => p.text({
389
+ message: "OpenAI-compatible chat completions endpoint:",
390
+ placeholder: "https://your-host/v1/chat/completions",
391
+ validate: (v) => {
392
+ if (!v?.trim())
393
+ return "Endpoint cannot be empty";
394
+ if (!v.startsWith("http://") && !v.startsWith("https://"))
395
+ return "Endpoint must start with http:// or https://";
396
+ },
397
+ }));
398
+ const model = await prompt(() => p.text({
399
+ message: "Model name:",
400
+ placeholder: "gpt-4o-mini",
401
+ validate: (v) => {
402
+ if (!v?.trim())
403
+ return "Model name cannot be empty";
404
+ },
405
+ }));
406
+ llm = {
407
+ provider: "custom",
408
+ endpoint: endpoint.trim(),
409
+ model: model.trim(),
410
+ temperature: 0.3,
411
+ maxTokens: 1024,
412
+ };
413
+ }
414
+ else {
415
+ const preset = LLM_PRESETS.find((p) => p.value === choice);
416
+ if (!preset)
417
+ return undefined;
418
+ const model = await prompt(() => p.text({
419
+ message: `Model for ${preset.label}:`,
420
+ placeholder: preset.defaultModel,
421
+ defaultValue: preset.defaultModel,
422
+ validate: (v) => {
423
+ if (!v?.trim())
424
+ return "Model name cannot be empty";
425
+ },
426
+ }));
427
+ llm = {
428
+ provider: preset.value,
429
+ endpoint: preset.endpoint,
430
+ model: model.trim() || preset.defaultModel,
336
431
  temperature: 0.3,
337
- maxTokens: 512,
432
+ maxTokens: 1024,
433
+ contextWindow: preset.contextWindow,
338
434
  };
339
435
  }
340
- return { embedding, llm };
436
+ // Remind the user about API key placement. We do not offer a "store in config"
437
+ // option because saveConfig() strips apiKey fields before writing — persisting
438
+ // secrets would need an encrypted/secure store that we don't ship.
439
+ const needsKey = llm.provider !== "ollama" && !llm.endpoint.includes("localhost");
440
+ if (needsKey && !process.env.AKM_LLM_API_KEY) {
441
+ p.log.info("This provider requires an API key. Set AKM_LLM_API_KEY in your shell (e.g. `export AKM_LLM_API_KEY=...`) before running `akm index`.");
442
+ }
443
+ // Capability probe — best-effort, never blocks setup.
444
+ const probeSpin = p.spinner();
445
+ probeSpin.start("Probing LLM (structured-output round-trip)...");
446
+ const probe = await probeLlmCapabilities(llm);
447
+ if (probe.reachable && probe.structuredOutput) {
448
+ probeSpin.stop("LLM reachable; structured output verified.");
449
+ llm.capabilities = { ...(llm.capabilities ?? {}), structuredOutput: true };
450
+ }
451
+ else if (probe.reachable) {
452
+ probeSpin.stop("LLM reachable but structured-output probe failed.");
453
+ llm.capabilities = { ...(llm.capabilities ?? {}), structuredOutput: false };
454
+ }
455
+ else {
456
+ probeSpin.stop("LLM not reachable.");
457
+ p.log.warn(`Could not reach the LLM endpoint${probe.error ? ` (${probe.error})` : ""}. Configuration was saved; verify your endpoint and API key, then retry.`);
458
+ }
459
+ return llm;
341
460
  }
342
461
  async function stepRegistries(current) {
343
462
  const defaults = DEFAULT_CONFIG.registries ?? [];
@@ -402,7 +521,7 @@ export async function stepStashSources(current) {
402
521
  for (const url of selectedRepos) {
403
522
  if (!existingUrls.has(url)) {
404
523
  const rec = RECOMMENDED_GITHUB_REPOS.find((r) => r.url === url);
405
- stashes.push({ type: "github", url, name: rec?.name });
524
+ stashes.push({ type: "git", url, name: rec?.name });
406
525
  existingUrls.add(url);
407
526
  }
408
527
  }
@@ -489,7 +608,7 @@ export async function stepStashSources(current) {
489
608
  }));
490
609
  if (name === null)
491
610
  continue;
492
- const entry = { type: "github", url: url.trim() };
611
+ const entry = { type: "git", url: url.trim() };
493
612
  if (name.trim())
494
613
  entry.name = name.trim();
495
614
  if (!stashes.some((s) => s.url === entry.url)) {
@@ -579,9 +698,15 @@ export async function runSetupWizard() {
579
698
  p.log.warn("No network connectivity detected. Skipping Ollama detection and remote embedding checks.\n" +
580
699
  "Local-only setup will continue. Re-run `akm setup` when online for full configuration.");
581
700
  }
582
- // Step 2: Ollama / Embedding / LLM
583
- p.log.step("Step 2: Embedding & LLM");
584
- const { embedding, llm } = online ? await stepOllama(current) : { embedding: current.embedding, llm: current.llm };
701
+ // Step 2: Embedding (Ollama detection drives the embedding choice + surfaces
702
+ // the Ollama endpoint to the LLM step that follows).
703
+ p.log.step("Step 2: Embedding");
704
+ const { embedding, ollamaEndpoint, ollamaChatModels } = online
705
+ ? await stepOllama(current)
706
+ : { embedding: current.embedding };
707
+ // Step 2b: LLM provider — Anthropic / OpenAI / Gemini / Ollama / custom.
708
+ p.log.step("Step 2b: LLM Provider");
709
+ const llm = online ? await stepLlm(current, ollamaEndpoint, ollamaChatModels) : current.llm;
585
710
  // Step 3: Semantic search assets
586
711
  p.log.step("Step 3: Semantic Search");
587
712
  const semanticSearchMode = await stepSemanticSearch(current, embedding);
package/dist/stash-add.js CHANGED
@@ -8,32 +8,50 @@ import { upsertLockEntry } from "./lockfile";
8
8
  import { detectStashRoot, installRegistryRef, upsertInstalledRegistryEntry } from "./registry-install";
9
9
  import { parseRegistryRef } from "./registry-resolve";
10
10
  import { ensureWebsiteMirror, validateWebsiteInputUrl } from "./stash-providers/website";
11
+ import { warn } from "./warn";
12
+ import { validateWikiName } from "./wiki";
13
+ const VALID_OVERRIDE_TYPES = new Set(["wiki"]);
11
14
  export async function akmAdd(input) {
12
15
  const ref = input.ref.trim();
13
16
  if (!ref)
14
17
  throw new UsageError("Install ref or local directory is required. " +
15
18
  "Examples: `akm add @scope/kit`, `akm add github:owner/repo`, `akm add ./local/path`");
19
+ // Validate and resolve wiki name when --type wiki is used
20
+ let wikiName;
21
+ if (input.overrideType) {
22
+ if (!VALID_OVERRIDE_TYPES.has(input.overrideType)) {
23
+ throw new UsageError(`Invalid --type value: "${input.overrideType}". Supported types: ${[...VALID_OVERRIDE_TYPES].join(", ")}`);
24
+ }
25
+ if (input.overrideType === "wiki") {
26
+ const derived = input.name ?? deriveWikiNameFromRef(ref);
27
+ validateWikiName(derived);
28
+ wikiName = derived;
29
+ }
30
+ }
16
31
  const stashDir = resolveStashDir();
17
32
  if (shouldAddAsWebsiteUrl(ref)) {
18
- return addWebsiteStashSource(ref, stashDir, input.name, input.options);
33
+ return addWebsiteStashSource(ref, stashDir, input.name ?? wikiName, input.options, wikiName);
19
34
  }
20
35
  // Detect local directory refs and route them to stashes[] instead of installed[]
21
36
  try {
22
37
  const parsed = parseRegistryRef(ref);
23
38
  if (parsed.source === "local") {
24
- return addLocalStashSource(ref, parsed.sourcePath, stashDir);
39
+ if (input.trustThisInstall) {
40
+ warn("--trust has no effect on local directory sources; the install audit is not run for local paths.");
41
+ }
42
+ return addLocalStashSource(ref, parsed.sourcePath, stashDir, wikiName);
25
43
  }
26
44
  }
27
45
  catch {
28
46
  // Not a local ref — fall through to registry install
29
47
  }
30
- return addRegistryKit(ref, stashDir);
48
+ return addRegistryKit(ref, stashDir, input.trustThisInstall, input.writable, wikiName);
31
49
  }
32
50
  /**
33
51
  * Add a local directory as a filesystem stash source.
34
52
  * Creates a stashes[] entry instead of an installed[] entry.
35
53
  */
36
- async function addLocalStashSource(ref, sourcePath, stashDir) {
54
+ async function addLocalStashSource(ref, sourcePath, stashDir, wikiName) {
37
55
  const stashRoot = detectStashRoot(sourcePath);
38
56
  const resolvedPath = path.resolve(stashRoot);
39
57
  const config = loadUserConfig();
@@ -44,11 +62,16 @@ async function addLocalStashSource(ref, sourcePath, stashDir) {
44
62
  const entry = {
45
63
  type: "filesystem",
46
64
  path: resolvedPath,
47
- name: toReadableId(resolvedPath),
65
+ name: wikiName ?? toReadableId(resolvedPath),
66
+ ...(wikiName ? { wikiName } : {}),
48
67
  };
49
68
  stashes.push(entry);
50
69
  saveConfig({ ...config, stashes });
51
70
  }
71
+ else if (wikiName && existing.wikiName !== wikiName) {
72
+ existing.wikiName = wikiName;
73
+ saveConfig({ ...config, stashes });
74
+ }
52
75
  const index = await akmIndex({ stashDir });
53
76
  const updatedConfig = loadConfig();
54
77
  return {
@@ -73,7 +96,7 @@ async function addLocalStashSource(ref, sourcePath, stashDir) {
73
96
  },
74
97
  };
75
98
  }
76
- async function addWebsiteStashSource(ref, stashDir, name, options) {
99
+ async function addWebsiteStashSource(ref, stashDir, name, options, wikiName) {
77
100
  const normalizedUrl = validateWebsiteInputUrl(ref);
78
101
  const config = loadUserConfig();
79
102
  const stashes = [...(config.stashes ?? [])];
@@ -84,13 +107,23 @@ async function addWebsiteStashSource(ref, stashDir, name, options) {
84
107
  url: normalizedUrl,
85
108
  name: name ?? toWebsiteName(normalizedUrl),
86
109
  ...(options && Object.keys(options).length > 0 ? { options } : {}),
110
+ ...(wikiName ? { wikiName } : {}),
87
111
  };
88
112
  stashes.push(entry);
89
113
  saveConfig({ ...config, stashes });
90
114
  }
91
- else if (options && Object.keys(options).length > 0) {
92
- entry.options = { ...entry.options, ...options };
93
- saveConfig({ ...config, stashes });
115
+ else {
116
+ let changed = false;
117
+ if (options && Object.keys(options).length > 0) {
118
+ entry.options = { ...entry.options, ...options };
119
+ changed = true;
120
+ }
121
+ if (wikiName && entry.wikiName !== wikiName) {
122
+ entry.wikiName = wikiName;
123
+ changed = true;
124
+ }
125
+ if (changed)
126
+ saveConfig({ ...config, stashes });
94
127
  }
95
128
  const cachePaths = await ensureWebsiteMirror(entry, { requireStashDir: true });
96
129
  const index = await akmIndex({ stashDir });
@@ -120,8 +153,8 @@ async function addWebsiteStashSource(ref, stashDir, name, options) {
120
153
  /**
121
154
  * Install a kit from a registry (npm, github, git).
122
155
  */
123
- async function addRegistryKit(ref, stashDir) {
124
- const installed = await installRegistryRef(ref);
156
+ async function addRegistryKit(ref, stashDir, trustThisInstall, writable, wikiName) {
157
+ const installed = await installRegistryRef(ref, { trustThisInstall, writable });
125
158
  const replaced = (loadConfig().installed ?? []).find((entry) => entry.id === installed.id);
126
159
  const config = upsertInstalledRegistryEntry({
127
160
  id: installed.id,
@@ -133,6 +166,8 @@ async function addRegistryKit(ref, stashDir) {
133
166
  stashRoot: installed.stashRoot,
134
167
  cacheDir: installed.cacheDir,
135
168
  installedAt: installed.installedAt,
169
+ writable: installed.writable,
170
+ ...(wikiName ? { wikiName } : {}),
136
171
  });
137
172
  await upsertLockEntry({
138
173
  id: installed.id,
@@ -211,3 +246,41 @@ function toWebsiteName(siteUrl) {
211
246
  return siteUrl;
212
247
  }
213
248
  }
249
+ /**
250
+ * Derive a wiki name from a ref string when --name is not provided.
251
+ * Lowercases and slugifies the most meaningful identifier segment.
252
+ */
253
+ export function deriveWikiNameFromRef(ref) {
254
+ let candidate = ref;
255
+ // github:owner/repo or github:owner/repo@ref
256
+ if (/^github:/i.test(ref)) {
257
+ const repoPath = ref.replace(/^github:/i, "").split("@")[0];
258
+ candidate = repoPath.split("/").pop() ?? repoPath;
259
+ }
260
+ // npm:pkg or @scope/pkg
261
+ else if (/^npm:/i.test(ref) || ref.startsWith("@")) {
262
+ candidate = ref
263
+ .replace(/^npm:/i, "")
264
+ .replace(/^@[^/]+\//, "")
265
+ .split("@")[0];
266
+ }
267
+ // git URLs or HTTPS git URLs
268
+ else if (/^(git:|https?:\/\/)/.test(ref)) {
269
+ try {
270
+ candidate = new URL(ref).pathname.split("/").pop() ?? candidate;
271
+ }
272
+ catch {
273
+ candidate = ref.split("/").pop() ?? ref;
274
+ }
275
+ candidate = candidate.replace(/\.git$/, "");
276
+ }
277
+ // Local paths
278
+ else {
279
+ candidate = path.basename(ref.replace(/\/+$/, ""));
280
+ }
281
+ return candidate
282
+ .toLowerCase()
283
+ .replace(/[^a-z0-9]+/g, "-")
284
+ .replace(/^-+|-+$/g, "")
285
+ .slice(0, 64);
286
+ }