knowns 0.10.6 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. package/README.md +69 -3
  2. package/dist/index.js +2254 -447
  3. package/dist/mcp/server.js +791 -98
  4. package/dist/ui/assets/_baseUniq-BnxjK44c.js +1 -0
  5. package/dist/ui/assets/arc-DZvVmMpH.js +1 -0
  6. package/dist/ui/assets/architectureDiagram-VXUJARFQ-BZL6VCKH.js +36 -0
  7. package/dist/ui/assets/blockDiagram-VD42YOAC-ClTPsT-I.js +122 -0
  8. package/dist/ui/assets/c4Diagram-YG6GDRKO-Bw103idR.js +10 -0
  9. package/dist/ui/assets/channel-D9x35499.js +1 -0
  10. package/dist/ui/assets/chunk-4BX2VUAB-DypK8CJ4.js +1 -0
  11. package/dist/ui/assets/chunk-55IACEB6-BBDdrBL9.js +1 -0
  12. package/dist/ui/assets/chunk-B4BG7PRW-K5LInW1J.js +165 -0
  13. package/dist/ui/assets/chunk-DI55MBZ5-DnPeq72k.js +220 -0
  14. package/dist/ui/assets/chunk-FMBD7UC4-BouMIneX.js +15 -0
  15. package/dist/ui/assets/chunk-QN33PNHL-Ba1GHx9G.js +1 -0
  16. package/dist/ui/assets/chunk-QZHKN3VN-DR78GQki.js +1 -0
  17. package/dist/ui/assets/chunk-TZMSLE5B-DS5ZUS0q.js +1 -0
  18. package/dist/ui/assets/classDiagram-2ON5EDUG-BWeu8ZEl.js +1 -0
  19. package/dist/ui/assets/classDiagram-v2-WZHVMYZB-BWeu8ZEl.js +1 -0
  20. package/dist/ui/assets/clone-CojkIg2_.js +1 -0
  21. package/dist/ui/assets/cose-bilkent-S5V4N54A-d4bDKFYg.js +1 -0
  22. package/dist/ui/assets/cytoscape.esm-5J0xJHOV.js +321 -0
  23. package/dist/ui/assets/dagre-6UL2VRFP-ehZmQh8m.js +4 -0
  24. package/dist/ui/assets/defaultLocale-DX6XiGOO.js +1 -0
  25. package/dist/ui/assets/diagram-PSM6KHXK-BHu3Ht-9.js +24 -0
  26. package/dist/ui/assets/diagram-QEK2KX5R-B-FuS1Ks.js +43 -0
  27. package/dist/ui/assets/diagram-S2PKOQOG-9mYWKwpl.js +24 -0
  28. package/dist/ui/assets/erDiagram-Q2GNP2WA-DyH-5pPP.js +60 -0
  29. package/dist/ui/assets/flowDiagram-NV44I4VS-nG1wIf-7.js +162 -0
  30. package/dist/ui/assets/ganttDiagram-JELNMOA3-Dme9C52c.js +267 -0
  31. package/dist/ui/assets/gitGraphDiagram-NY62KEGX-Ler4P2Pa.js +65 -0
  32. package/dist/ui/assets/graph-CVs9_WqW.js +1 -0
  33. package/dist/ui/assets/index-BDuGtZOD.css +1 -0
  34. package/dist/ui/assets/index-BfOQwuot.js +424 -0
  35. package/dist/ui/assets/infoDiagram-WHAUD3N6-B_M3sR5-.js +2 -0
  36. package/dist/ui/assets/init-Gi6I4Gst.js +1 -0
  37. package/dist/ui/assets/journeyDiagram-XKPGCS4Q-BSLvczLS.js +139 -0
  38. package/dist/ui/assets/kanban-definition-3W4ZIXB7-DeqtFXdg.js +89 -0
  39. package/dist/ui/assets/katex-DhXJpUyf.js +261 -0
  40. package/dist/ui/assets/layout-D33h6Hj3.js +1 -0
  41. package/dist/ui/assets/linear-BnzJ09ng.js +1 -0
  42. package/dist/ui/assets/min-CrKfxF3L.js +1 -0
  43. package/dist/ui/assets/mindmap-definition-VGOIOE7T-ClMK1T7-.js +68 -0
  44. package/dist/ui/assets/ordinal-Cboi1Yqb.js +1 -0
  45. package/dist/ui/assets/pieDiagram-ADFJNKIX-t0ZVgZgx.js +30 -0
  46. package/dist/ui/assets/quadrantDiagram-AYHSOK5B-DpcRGPXU.js +7 -0
  47. package/dist/ui/assets/requirementDiagram-UZGBJVZJ-BRd7le_X.js +64 -0
  48. package/dist/ui/assets/sankeyDiagram-TZEHDZUN-CvzudzQ1.js +10 -0
  49. package/dist/ui/assets/sequenceDiagram-WL72ISMW-vMDZGgni.js +145 -0
  50. package/dist/ui/assets/stateDiagram-FKZM4ZOC-CsKLmSJ0.js +1 -0
  51. package/dist/ui/assets/stateDiagram-v2-4FDKWEC3-CBgoBfXm.js +1 -0
  52. package/dist/ui/assets/timeline-definition-IT6M3QCI-CtNX-O2_.js +61 -0
  53. package/dist/ui/assets/treemap-KMMF4GRG-DH2wEEvC.js +128 -0
  54. package/dist/ui/assets/xychartDiagram-PRI3JC2R-RlXBItY0.js +7 -0
  55. package/dist/ui/index.html +2 -2
  56. package/package.json +2 -4
  57. package/dist/ui/assets/index-2CDomS1a.js +0 -317
  58. package/dist/ui/assets/index-B1mpVDN3.css +0 -1
  59. package/dist/ui/assets/inter-v12-latin-100-46Mq0mOp.woff +0 -0
  60. package/dist/ui/assets/inter-v12-latin-100-BQDzDElq.woff2 +0 -0
  61. package/dist/ui/assets/inter-v12-latin-200-BxfrU12A.woff2 +0 -0
  62. package/dist/ui/assets/inter-v12-latin-200-DXfqWPZg.woff +0 -0
  63. package/dist/ui/assets/inter-v12-latin-300-DEbyFmpd.woff2 +0 -0
  64. package/dist/ui/assets/inter-v12-latin-300-f7r92Nkj.woff +0 -0
  65. package/dist/ui/assets/inter-v12-latin-500-BQ2gQN_M.woff +0 -0
  66. package/dist/ui/assets/inter-v12-latin-500-DfX5FI9E.woff2 +0 -0
  67. package/dist/ui/assets/inter-v12-latin-600-BvOeHRLc.woff2 +0 -0
  68. package/dist/ui/assets/inter-v12-latin-600-D01NXWOK.woff +0 -0
  69. package/dist/ui/assets/inter-v12-latin-700-B5TOIllR.woff +0 -0
  70. package/dist/ui/assets/inter-v12-latin-700-Bj1B9WKG.woff2 +0 -0
  71. package/dist/ui/assets/inter-v12-latin-800-Bdy4lAMa.woff2 +0 -0
  72. package/dist/ui/assets/inter-v12-latin-800-DFVvDWwT.woff +0 -0
  73. package/dist/ui/assets/inter-v12-latin-900-CMga-52B.woff2 +0 -0
  74. package/dist/ui/assets/inter-v12-latin-900-ORHAl5ZU.woff +0 -0
  75. package/dist/ui/assets/inter-v12-latin-regular-CahmJf_6.woff +0 -0
  76. package/dist/ui/assets/inter-v12-latin-regular-YtgfLPRn.woff2 +0 -0
  77. package/dist/ui/assets/module-RjUF93sV.js +0 -716
  78. package/dist/ui/assets/native-48B9X9Wg.js +0 -1
@@ -10076,10 +10076,10 @@ var require_stringify = __commonJS({
10076
10076
  data = Object.assign({}, file3.data, data);
10077
10077
  const open = opts.delimiters[0];
10078
10078
  const close = opts.delimiters[1];
10079
- const matter7 = engine.stringify(data, options2).trim();
10079
+ const matter8 = engine.stringify(data, options2).trim();
10080
10080
  let buf = "";
10081
- if (matter7 !== "{}") {
10082
- buf = newline(open) + newline(matter7) + newline(close);
10081
+ if (matter8 !== "{}") {
10082
+ buf = newline(open) + newline(matter8) + newline(close);
10083
10083
  }
10084
10084
  if (typeof file3.excerpt === "string" && file3.excerpt !== "") {
10085
10085
  if (str2.indexOf(file3.excerpt.trim()) === -1) {
@@ -10185,19 +10185,19 @@ var require_gray_matter = __commonJS({
10185
10185
  var toFile = require_to_file();
10186
10186
  var parse4 = require_parse();
10187
10187
  var utils = require_utils2();
10188
- function matter7(input, options2) {
10188
+ function matter8(input, options2) {
10189
10189
  if (input === "") {
10190
10190
  return { data: {}, content: input, excerpt: "", orig: input };
10191
10191
  }
10192
10192
  let file3 = toFile(input);
10193
- const cached2 = matter7.cache[file3.content];
10193
+ const cached2 = matter8.cache[file3.content];
10194
10194
  if (!options2) {
10195
10195
  if (cached2) {
10196
10196
  file3 = Object.assign({}, cached2);
10197
10197
  file3.orig = cached2.orig;
10198
10198
  return file3;
10199
10199
  }
10200
- matter7.cache[file3.content] = file3;
10200
+ matter8.cache[file3.content] = file3;
10201
10201
  }
10202
10202
  return parseMatter(file3, options2);
10203
10203
  }
@@ -10219,7 +10219,7 @@ var require_gray_matter = __commonJS({
10219
10219
  }
10220
10220
  str2 = str2.slice(openLen);
10221
10221
  const len = str2.length;
10222
- const language = matter7.language(str2, opts);
10222
+ const language = matter8.language(str2, opts);
10223
10223
  if (language.name) {
10224
10224
  file3.language = language.name;
10225
10225
  str2 = str2.slice(language.raw.length);
@@ -10254,24 +10254,24 @@ var require_gray_matter = __commonJS({
10254
10254
  }
10255
10255
  return file3;
10256
10256
  }
10257
- matter7.engines = engines2;
10258
- matter7.stringify = function(file3, data, options2) {
10259
- if (typeof file3 === "string") file3 = matter7(file3, options2);
10257
+ matter8.engines = engines2;
10258
+ matter8.stringify = function(file3, data, options2) {
10259
+ if (typeof file3 === "string") file3 = matter8(file3, options2);
10260
10260
  return stringify(file3, data, options2);
10261
10261
  };
10262
- matter7.read = function(filepath, options2) {
10262
+ matter8.read = function(filepath, options2) {
10263
10263
  const str2 = fs.readFileSync(filepath, "utf8");
10264
- const file3 = matter7(str2, options2);
10264
+ const file3 = matter8(str2, options2);
10265
10265
  file3.path = filepath;
10266
10266
  return file3;
10267
10267
  };
10268
- matter7.test = function(str2, options2) {
10268
+ matter8.test = function(str2, options2) {
10269
10269
  return utils.startsWith(str2, defaults2(options2).delimiters[0]);
10270
10270
  };
10271
- matter7.language = function(str2, options2) {
10271
+ matter8.language = function(str2, options2) {
10272
10272
  const opts = defaults2(options2);
10273
10273
  const open = opts.delimiters[0];
10274
- if (matter7.test(str2)) {
10274
+ if (matter8.test(str2)) {
10275
10275
  str2 = str2.slice(open.length);
10276
10276
  }
10277
10277
  const language = str2.slice(0, str2.search(/\r?\n/));
@@ -10280,11 +10280,11 @@ var require_gray_matter = __commonJS({
10280
10280
  name: language ? language.trim() : ""
10281
10281
  };
10282
10282
  };
10283
- matter7.cache = {};
10284
- matter7.clearCache = function() {
10285
- matter7.cache = {};
10283
+ matter8.cache = {};
10284
+ matter8.clearCache = function() {
10285
+ matter8.cache = {};
10286
10286
  };
10287
- module2.exports = matter7;
10287
+ module2.exports = matter8;
10288
10288
  }
10289
10289
  });
10290
10290
 
@@ -18688,7 +18688,7 @@ var require_no_conflict = __commonJS({
18688
18688
  "node_modules/handlebars/dist/cjs/handlebars/no-conflict.js"(exports2, module2) {
18689
18689
  "use strict";
18690
18690
  exports2.__esModule = true;
18691
- exports2["default"] = function(Handlebars2) {
18691
+ exports2["default"] = function(Handlebars3) {
18692
18692
  (function() {
18693
18693
  if (typeof globalThis === "object") return;
18694
18694
  Object.prototype.__defineGetter__("__magic__", function() {
@@ -18698,11 +18698,11 @@ var require_no_conflict = __commonJS({
18698
18698
  delete Object.prototype.__magic__;
18699
18699
  })();
18700
18700
  var $Handlebars = globalThis.Handlebars;
18701
- Handlebars2.noConflict = function() {
18702
- if (globalThis.Handlebars === Handlebars2) {
18701
+ Handlebars3.noConflict = function() {
18702
+ if (globalThis.Handlebars === Handlebars3) {
18703
18703
  globalThis.Handlebars = $Handlebars;
18704
18704
  }
18705
- return Handlebars2;
18705
+ return Handlebars3;
18706
18706
  };
18707
18707
  };
18708
18708
  module2.exports = exports2["default"];
@@ -20622,7 +20622,7 @@ var require_util2 = __commonJS({
20622
20622
  return path2;
20623
20623
  }
20624
20624
  exports2.normalize = normalize2;
20625
- function join21(aRoot, aPath) {
20625
+ function join22(aRoot, aPath) {
20626
20626
  if (aRoot === "") {
20627
20627
  aRoot = ".";
20628
20628
  }
@@ -20654,7 +20654,7 @@ var require_util2 = __commonJS({
20654
20654
  }
20655
20655
  return joined;
20656
20656
  }
20657
- exports2.join = join21;
20657
+ exports2.join = join22;
20658
20658
  exports2.isAbsolute = function(aPath) {
20659
20659
  return aPath.charAt(0) === "/" || urlRegexp.test(aPath);
20660
20660
  };
@@ -20827,7 +20827,7 @@ var require_util2 = __commonJS({
20827
20827
  parsed.path = parsed.path.substring(0, index + 1);
20828
20828
  }
20829
20829
  }
20830
- sourceURL = join21(urlGenerate(parsed), sourceURL);
20830
+ sourceURL = join22(urlGenerate(parsed), sourceURL);
20831
20831
  }
20832
20832
  return normalize2(sourceURL);
20833
20833
  }
@@ -28653,9 +28653,9 @@ var require_prompts3 = __commonJS({
28653
28653
  });
28654
28654
 
28655
28655
  // src/mcp/server.ts
28656
- import { existsSync as existsSync16 } from "node:fs";
28657
- import { readFile as readFile10 } from "node:fs/promises";
28658
- import { join as join20 } from "node:path";
28656
+ import { existsSync as existsSync17 } from "node:fs";
28657
+ import { readFile as readFile11 } from "node:fs/promises";
28658
+ import { join as join21 } from "node:path";
28659
28659
 
28660
28660
  // node_modules/zod/v3/helpers/util.js
28661
28661
  var util;
@@ -49715,6 +49715,7 @@ function parseTaskMarkdown(content) {
49715
49715
  assignee: frontmatter.assignee,
49716
49716
  labels: frontmatter.labels || [],
49717
49717
  parent: frontmatter.parent,
49718
+ spec: frontmatter.spec,
49718
49719
  subtasks: [],
49719
49720
  // Will be populated by FileStore
49720
49721
  createdAt: new Date(frontmatter.createdAt),
@@ -49741,6 +49742,7 @@ function serializeTaskMarkdown(task) {
49741
49742
  };
49742
49743
  if (task.assignee) frontmatter.assignee = task.assignee;
49743
49744
  if (task.parent) frontmatter.parent = task.parent;
49745
+ if (task.spec) frontmatter.spec = task.spec;
49744
49746
  let body = "";
49745
49747
  body += `# ${task.title}
49746
49748
 
@@ -50433,7 +50435,7 @@ function normalizePath(filePath) {
50433
50435
  }
50434
50436
 
50435
50437
  // src/mcp/server.ts
50436
- var import_gray_matter6 = __toESM(require_gray_matter(), 1);
50438
+ var import_gray_matter7 = __toESM(require_gray_matter(), 1);
50437
50439
 
50438
50440
  // src/utils/notify-server.ts
50439
50441
  import { existsSync as existsSync2, readFileSync } from "node:fs";
@@ -50818,6 +50820,10 @@ function errorResponse(error48) {
50818
50820
  }
50819
50821
 
50820
50822
  // src/mcp/handlers/task.ts
50823
+ function normalizeNewlines(text) {
50824
+ if (!text) return text;
50825
+ return text.replace(/\\n/g, "\n");
50826
+ }
50821
50827
  var createTaskSchema = external_exports3.object({
50822
50828
  title: external_exports3.string(),
50823
50829
  description: external_exports3.string().optional(),
@@ -50825,7 +50831,8 @@ var createTaskSchema = external_exports3.object({
50825
50831
  priority: external_exports3.enum(["low", "medium", "high"]).optional(),
50826
50832
  assignee: external_exports3.string().optional(),
50827
50833
  labels: external_exports3.array(external_exports3.string()).optional(),
50828
- parent: external_exports3.string().optional()
50834
+ parent: external_exports3.string().optional(),
50835
+ spec: external_exports3.string().optional()
50829
50836
  });
50830
50837
  var getTaskSchema = external_exports3.object({
50831
50838
  taskId: external_exports3.string()
@@ -50838,6 +50845,8 @@ var updateTaskSchema = external_exports3.object({
50838
50845
  priority: external_exports3.enum(["low", "medium", "high"]).optional(),
50839
50846
  assignee: external_exports3.string().optional(),
50840
50847
  labels: external_exports3.array(external_exports3.string()).optional(),
50848
+ spec: external_exports3.string().nullable().optional(),
50849
+ // Spec document path (null to remove)
50841
50850
  // AC operations
50842
50851
  addAc: external_exports3.array(external_exports3.string()).optional(),
50843
50852
  // Add new acceptance criteria
@@ -50859,7 +50868,8 @@ var listTasksSchema = external_exports3.object({
50859
50868
  status: external_exports3.string().optional(),
50860
50869
  priority: external_exports3.string().optional(),
50861
50870
  assignee: external_exports3.string().optional(),
50862
- label: external_exports3.string().optional()
50871
+ label: external_exports3.string().optional(),
50872
+ spec: external_exports3.string().optional()
50863
50873
  });
50864
50874
  var searchTasksSchema = external_exports3.object({
50865
50875
  query: external_exports3.string()
@@ -50889,7 +50899,8 @@ var taskTools = [
50889
50899
  items: { type: "string" },
50890
50900
  description: "Task labels"
50891
50901
  },
50892
- parent: { type: "string", description: "Parent task ID for subtasks" }
50902
+ parent: { type: "string", description: "Parent task ID for subtasks" },
50903
+ spec: { type: "string", description: "Spec document path (e.g., 'specs/user-auth')" }
50893
50904
  },
50894
50905
  required: ["title"]
50895
50906
  }
@@ -50930,6 +50941,7 @@ var taskTools = [
50930
50941
  items: { type: "string" },
50931
50942
  description: "New labels"
50932
50943
  },
50944
+ spec: { type: "string", description: "Spec document path (set to null to remove)" },
50933
50945
  addAc: {
50934
50946
  type: "array",
50935
50947
  items: { type: "string" },
@@ -50966,7 +50978,8 @@ var taskTools = [
50966
50978
  status: { type: "string", description: "Filter by status" },
50967
50979
  priority: { type: "string", description: "Filter by priority" },
50968
50980
  assignee: { type: "string", description: "Filter by assignee" },
50969
- label: { type: "string", description: "Filter by label" }
50981
+ label: { type: "string", description: "Filter by label" },
50982
+ spec: { type: "string", description: "Filter by spec document path" }
50970
50983
  }
50971
50984
  }
50972
50985
  },
@@ -50986,12 +50999,13 @@ async function handleCreateTask(args, fileStore) {
50986
50999
  const input = createTaskSchema.parse(args);
50987
51000
  const task = await fileStore.createTask({
50988
51001
  title: input.title,
50989
- description: input.description,
51002
+ description: normalizeNewlines(input.description),
50990
51003
  status: input.status || "todo",
50991
51004
  priority: input.priority || "medium",
50992
51005
  assignee: input.assignee,
50993
51006
  labels: input.labels || [],
50994
51007
  parent: input.parent,
51008
+ spec: input.spec,
50995
51009
  subtasks: [],
50996
51010
  acceptanceCriteria: [],
50997
51011
  timeSpent: 0,
@@ -51024,6 +51038,7 @@ async function handleGetTask(args, fileStore) {
51024
51038
  priority: task.priority,
51025
51039
  assignee: task.assignee,
51026
51040
  labels: task.labels,
51041
+ spec: task.spec,
51027
51042
  acceptanceCriteria: task.acceptanceCriteria,
51028
51043
  implementationPlan: task.implementationPlan,
51029
51044
  implementationNotes: task.implementationNotes,
@@ -51042,11 +51057,14 @@ async function handleUpdateTask(args, fileStore) {
51042
51057
  }
51043
51058
  const updates = {};
51044
51059
  if (input.title) updates.title = input.title;
51045
- if (input.description) updates.description = input.description;
51060
+ if (input.description) updates.description = normalizeNewlines(input.description);
51046
51061
  if (input.status) updates.status = input.status;
51047
51062
  if (input.priority) updates.priority = input.priority;
51048
51063
  if (input.assignee) updates.assignee = input.assignee;
51049
51064
  if (input.labels) updates.labels = input.labels;
51065
+ if (input.spec !== void 0) {
51066
+ updates.spec = input.spec === null ? void 0 : input.spec;
51067
+ }
51050
51068
  const criteria = [...currentTask.acceptanceCriteria];
51051
51069
  let acModified = false;
51052
51070
  if (input.addAc && input.addAc.length > 0) {
@@ -51089,15 +51107,15 @@ async function handleUpdateTask(args, fileStore) {
51089
51107
  updates.acceptanceCriteria = criteria;
51090
51108
  }
51091
51109
  if (input.plan !== void 0) {
51092
- updates.implementationPlan = input.plan;
51110
+ updates.implementationPlan = normalizeNewlines(input.plan);
51093
51111
  }
51094
51112
  if (input.notes !== void 0) {
51095
- updates.implementationNotes = input.notes;
51113
+ updates.implementationNotes = normalizeNewlines(input.notes);
51096
51114
  }
51097
51115
  if (input.appendNotes) {
51098
51116
  const existingNotes = currentTask.implementationNotes || "";
51099
51117
  const separator = existingNotes ? "\n\n" : "";
51100
- updates.implementationNotes = existingNotes + separator + input.appendNotes;
51118
+ updates.implementationNotes = existingNotes + separator + normalizeNewlines(input.appendNotes);
51101
51119
  }
51102
51120
  const task = await fileStore.updateTask(taskId, updates);
51103
51121
  await notifyTaskUpdate(task.id);
@@ -51128,6 +51146,9 @@ async function handleListTasks(args, fileStore) {
51128
51146
  if (input.label) {
51129
51147
  tasks = tasks.filter((t) => t.labels.includes(input.label));
51130
51148
  }
51149
+ if (input.spec) {
51150
+ tasks = tasks.filter((t) => t.spec === input.spec);
51151
+ }
51131
51152
  return successResponse({
51132
51153
  count: tasks.length,
51133
51154
  tasks: tasks.map((t) => ({
@@ -51136,7 +51157,8 @@ async function handleListTasks(args, fileStore) {
51136
51157
  status: t.status,
51137
51158
  priority: t.priority,
51138
51159
  assignee: t.assignee,
51139
- labels: t.labels
51160
+ labels: t.labels,
51161
+ spec: t.spec
51140
51162
  }))
51141
51163
  });
51142
51164
  }
@@ -51999,8 +52021,8 @@ var NpmProvider = class extends ImportProvider {
51999
52021
  try {
52000
52022
  const packageJsonPath = join12(tempDir, "package.json");
52001
52023
  if (existsSync8(packageJsonPath)) {
52002
- const { readFile: readFile11 } = await import("node:fs/promises");
52003
- const content = await readFile11(packageJsonPath, "utf-8");
52024
+ const { readFile: readFile12 } = await import("node:fs/promises");
52025
+ const content = await readFile12(packageJsonPath, "utf-8");
52004
52026
  const pkg = JSON.parse(content);
52005
52027
  if (pkg.version) {
52006
52028
  metadata.version = pkg.version;
@@ -52409,6 +52431,10 @@ async function validateRefs(projectRoot, content, tasksDir) {
52409
52431
  function getDocsDir() {
52410
52432
  return join15(getProjectRoot(), ".knowns", "docs");
52411
52433
  }
52434
+ function normalizeNewlines2(text) {
52435
+ if (!text) return text;
52436
+ return text.replace(/\\n/g, "\n");
52437
+ }
52412
52438
  var listDocsSchema = external_exports3.object({
52413
52439
  tag: external_exports3.string().optional()
52414
52440
  });
@@ -52800,7 +52826,7 @@ async function handleCreateDoc(args) {
52800
52826
  if (input.tags) {
52801
52827
  metadata.tags = input.tags;
52802
52828
  }
52803
- const initialContent = input.content || "# Content\n\nWrite your documentation here.";
52829
+ const initialContent = normalizeNewlines2(input.content) || "# Content\n\nWrite your documentation here.";
52804
52830
  const fileContent = import_gray_matter3.default.stringify(initialContent, metadata);
52805
52831
  await writeFile4(filepath, fileContent, "utf-8");
52806
52832
  await notifyDocUpdate(relativePath);
@@ -52829,9 +52855,11 @@ async function handleUpdateDoc(args) {
52829
52855
  metadata.updatedAt = (/* @__PURE__ */ new Date()).toISOString();
52830
52856
  let updatedContent = content;
52831
52857
  let sectionUpdated;
52832
- if (input.section && input.content) {
52858
+ const normalizedContent = normalizeNewlines2(input.content);
52859
+ const normalizedAppend = normalizeNewlines2(input.appendContent);
52860
+ if (input.section && normalizedContent) {
52833
52861
  const sectionIndex = /^\d+$/.test(input.section) ? Number.parseInt(input.section, 10) : null;
52834
- const result = sectionIndex !== null ? replaceSectionByIndex(content, sectionIndex, input.content) : replaceSection(content, input.section, input.content);
52862
+ const result = sectionIndex !== null ? replaceSectionByIndex(content, sectionIndex, normalizedContent) : replaceSection(content, input.section, normalizedContent);
52835
52863
  if (!result) {
52836
52864
  return errorResponse(
52837
52865
  `Section not found: ${input.section}. Use 'toc: true' with get_doc to see available sections.`
@@ -52839,13 +52867,13 @@ async function handleUpdateDoc(args) {
52839
52867
  }
52840
52868
  updatedContent = result;
52841
52869
  sectionUpdated = input.section;
52842
- } else if (input.content) {
52843
- updatedContent = input.content;
52870
+ } else if (normalizedContent) {
52871
+ updatedContent = normalizedContent;
52844
52872
  }
52845
- if (input.appendContent) {
52873
+ if (normalizedAppend) {
52846
52874
  updatedContent = `${updatedContent.trimEnd()}
52847
52875
 
52848
- ${input.appendContent}`;
52876
+ ${normalizedAppend}`;
52849
52877
  }
52850
52878
  const newFileContent = import_gray_matter3.default.stringify(updatedContent, metadata);
52851
52879
  await writeFile4(resolved.filepath, newFileContent, "utf-8");
@@ -53065,6 +53093,20 @@ async function listTemplates(templatesDir) {
53065
53093
  }
53066
53094
  return templates;
53067
53095
  }
53096
+ async function getTemplateConfig(templateDir) {
53097
+ const configPath = join16(templateDir, CONFIG_FILENAME);
53098
+ if (!existsSync12(configPath)) {
53099
+ return null;
53100
+ }
53101
+ try {
53102
+ const content = await readFile6(configPath, "utf-8");
53103
+ const raw = (0, import_yaml.parse)(content);
53104
+ const result = safeValidateTemplateConfig(raw);
53105
+ return result.success ? result.data : null;
53106
+ } catch {
53107
+ return null;
53108
+ }
53109
+ }
53068
53110
 
53069
53111
  // src/codegen/renderer.ts
53070
53112
  import { readFile as readFile7 } from "node:fs/promises";
@@ -60166,38 +60208,35 @@ async function ensureDir(dir) {
60166
60208
  // src/codegen/skill-parser.ts
60167
60209
  var import_gray_matter4 = __toESM(require_gray_matter(), 1);
60168
60210
 
60169
- // src/instructions/skills/knowns.commit/SKILL.md
60170
- var SKILL_default = '---\nname: knowns.commit\ndescription: Use when committing code changes with proper conventional commit format and verification\n---\n\n# Committing Changes\n\nCreate well-formatted commits following conventional commit standards.\n\n**Announce at start:** "I\'m using the knowns.commit skill to commit changes."\n\n**Core principle:** VERIFY BEFORE COMMITTING - check staged changes, ask for confirmation.\n\n## The Process\n\n### Step 1: Review Staged Changes\n\n```bash\ngit status\ngit diff --staged\n```\n\n### Step 2: Generate Commit Message\n\n**Format:**\n```\n<type>(<scope>): <message>\n\n- Bullet point summarizing change\n- Another point if needed\n```\n\n**Types:**\n\n| Type | Description |\n|------|-------------|\n| `feat` | New feature |\n| `fix` | Bug fix |\n| `docs` | Documentation only |\n| `style` | Formatting, no code change |\n| `refactor` | Code restructure |\n| `perf` | Performance improvement |\n| `test` | Adding tests |\n| `chore` | Maintenance |\n\n**Rules:**\n- Title lowercase, no period, max 50 chars\n- Scope optional but recommended\n- Body explains *why*, not just *what*\n\n### Step 3: Ask for Confirmation\n\nPresent message to user:\n\n```\nReady to commit:\n\nfeat(auth): add JWT token refresh\n\n- Added refresh token endpoint\n- Tokens expire after 1 hour\n\nProceed? (yes/no/edit)\n```\n\n**Wait for user approval.**\n\n### Step 4: Commit\n\n```bash\ngit commit -m "feat(auth): add JWT token refresh\n\n- Added refresh token endpoint\n- Tokens expire after 1 hour"\n```\n\n## Guidelines\n\n- Only commit staged files (don\'t `git add` unless asked)\n- NO "Co-Authored-By" lines\n- NO "Generated with Claude Code" ads\n- Ask before committing, never auto-commit\n\n## Examples\n\n**Good:**\n```\nfeat(api): add user profile endpoint\nfix(auth): handle expired token gracefully\ndocs(readme): update installation steps\n```\n\n**Bad:**\n```\nupdate code (too vague)\nWIP (not ready)\nfix bug (which bug?)\n```\n\n## Remember\n\n- Review staged changes first\n- Follow conventional format\n- Ask for confirmation\n- Keep messages concise\n';
60211
+ // src/instructions/skills/kn:commit/SKILL.md
60212
+ var SKILL_default = '---\nname: kn:commit\ndescription: Use when committing code changes with proper conventional commit format and verification\n---\n\n# Committing Changes\n\n**Announce:** "Using kn:commit to commit changes."\n\n**Core principle:** VERIFY BEFORE COMMITTING - check staged changes, ask for confirmation.\n\n## Step 1: Review Staged Changes\n\n```bash\ngit status\ngit diff --staged\n```\n\n## Step 2: Generate Commit Message\n\n**Format:**\n```\n<type>(<scope>): <message>\n\n- Bullet point summarizing change\n```\n\n**Types:** feat, fix, docs, style, refactor, perf, test, chore\n\n**Rules:**\n- Title lowercase, no period, max 50 chars\n- Body explains *why*, not just *what*\n\n## Step 3: Ask for Confirmation\n\n```\nReady to commit:\n\nfeat(auth): add JWT token refresh\n\n- Added refresh token endpoint\n\nProceed? (yes/no/edit)\n```\n\n**Wait for user approval.**\n\n## Step 4: Commit\n\n```bash\ngit commit -m "feat(auth): add JWT token refresh\n\n- Added refresh token endpoint"\n```\n\n## Guidelines\n\n- Only commit staged files\n- NO "Co-Authored-By" lines\n- NO "Generated with Claude Code" ads\n- Ask before committing\n\n## Checklist\n\n- [ ] Reviewed staged changes\n- [ ] Message follows convention\n- [ ] User approved\n';
60171
60213
 
60172
- // src/instructions/skills/knowns.doc/SKILL.md
60173
- var SKILL_default2 = '---\nname: knowns.doc\ndescription: Use when working with Knowns documentation - viewing, searching, creating, or updating docs\n---\n\n# Working with Documentation\n\nNavigate, create, and update Knowns project documentation.\n\n**Announce at start:** "I\'m using the knowns.doc skill to work with documentation."\n\n**Core principle:** SEARCH BEFORE CREATING - avoid duplicates.\n\n## Quick Reference\n\n{{#if mcp}}\n```json\n// List all docs\nmcp__knowns__list_docs({})\n\n// View doc (smart mode)\nmcp__knowns__get_doc({ "path": "<path>", "smart": true })\n\n// Search docs\nmcp__knowns__search_docs({ "query": "<query>" })\n\n// Create doc\nmcp__knowns__create_doc({\n "title": "<title>",\n "description": "<description>",\n "tags": ["tag1", "tag2"],\n "folder": "folder"\n})\n\n// Update doc\nmcp__knowns__update_doc({\n "path": "<path>",\n "content": "content"\n})\n\n// Update section only\nmcp__knowns__update_doc({\n "path": "<path>",\n "section": "2",\n "content": "new section content"\n})\n```\n{{else}}\n```bash\n# List all docs\nknowns doc list --plain\n\n# View doc (auto-handles large docs)\nknowns doc "<path>" --plain\n\n# Search docs\nknowns search "<query>" --type doc --plain\n\n# Create doc\nknowns doc create "<title>" -d "<description>" -t "tags" -f "folder"\n\n# Update doc\nknowns doc edit "<path>" -c "content" # Replace\nknowns doc edit "<path>" -a "content" # Append\nknowns doc edit "<path>" --section "2" -c "content" # Section only\n```\n{{/if}}\n\n## Reading Documents\n\n{{#if mcp}}\n**Use smart mode:**\n```json\nmcp__knowns__get_doc({ "path": "<path>", "smart": true })\n```\n\n- Small doc (\u22642000 tokens) \u2192 full content\n- Large doc \u2192 stats + TOC, then request specific section\n{{else}}\n**View doc:**\n```bash\nknowns doc "<path>" --plain\n```\n\nFor large docs, use sections:\n```bash\nknowns doc "<path>" --toc --plain\nknowns doc "<path>" --section "2" --plain\n```\n{{/if}}\n\n## Creating Documents\n\n### Step 1: Search First\n\n{{#if mcp}}\n```json\nmcp__knowns__search_docs({ "query": "<topic>" })\n```\n{{else}}\n```bash\nknowns search "<topic>" --type doc --plain\n```\n{{/if}}\n\n**Don\'t duplicate.** Update existing docs when possible.\n\n### Step 2: Choose Location\n\n| Doc Type | Location | Folder |\n|----------|----------|--------|\n| Core (README, ARCH) | Root | (none) |\n| Guide | `guides/` | `guides` |\n| Pattern | `patterns/` | `patterns` |\n| API doc | `api/` | `api` |\n\n### Step 3: Create\n\n{{#if mcp}}\n```json\nmcp__knowns__create_doc({\n "title": "<title>",\n "description": "<brief description>",\n "tags": ["tag1", "tag2"],\n "folder": "folder"\n})\n```\n{{else}}\n```bash\nknowns doc create "<title>" \\\n -d "<brief description>" \\\n -t "tag1,tag2" \\\n -f "folder" # optional\n```\n{{/if}}\n\n### Step 4: Add Content\n\n{{#if mcp}}\n```json\nmcp__knowns__update_doc({\n "path": "<path>",\n "content": "# Title\\n\\n## 1. Overview\\nWhat this doc covers.\\n\\n## 2. Details\\nMain content."\n})\n```\n{{else}}\n```bash\nknowns doc edit "<path>" -c "$(cat <<\'EOF\'\n# Title\n\n## 1. Overview\nWhat this doc covers.\n\n## 2. Details\nMain content.\n\n## 3. Examples\nPractical examples.\nEOF\n)"\n```\n{{/if}}\n\n## Updating Documents\n\n### View First\n\n{{#if mcp}}\n```json\nmcp__knowns__get_doc({ "path": "<path>", "smart": true })\nmcp__knowns__get_doc({ "path": "<path>", "toc": true })\n```\n{{else}}\n```bash\nknowns doc "<path>" --plain\nknowns doc "<path>" --toc --plain # For large docs\n```\n{{/if}}\n\n### Update Methods\n\n| Method | Use When |\n|--------|----------|\n| Replace all | Rewriting entire doc |\n| Append | Adding to end |\n| Section edit | Updating one section |\n\n**Section edit is most efficient** - less context, safer.\n\n{{#if mcp}}\n```json\n// Update just section 3\nmcp__knowns__update_doc({\n "path": "<path>",\n "section": "3",\n "content": "## 3. New Content\\n\\nUpdated section content..."\n})\n```\n{{else}}\n```bash\n# Update just section 3\nknowns doc edit "<path>" --section "3" -c "## 3. New Content\n\nUpdated section content..."\n```\n{{/if}}\n\n## Document Structure\n\nUse numbered headings for section editing to work:\n\n```markdown\n# Title (H1 - only one)\n\n## 1. Overview\n...\n\n## 2. Installation\n...\n\n## 3. Configuration\n...\n```\n\n## Remember\n\n- Search before creating (avoid duplicates)\n- Use smart mode when reading\n- Use section editing for targeted updates\n- Use numbered headings\n- Reference docs with `@doc/<path>`\n';
60214
+ // src/instructions/skills/kn:doc/SKILL.md
60215
+ var SKILL_default2 = '---\nname: kn:doc\ndescription: Use when working with Knowns documentation - viewing, searching, creating, or updating docs\n---\n\n# Working with Documentation\n\n**Announce:** "Using kn:doc to work with documentation."\n\n**Core principle:** SEARCH BEFORE CREATING - avoid duplicates.\n\n## Quick Reference\n\n```json\n// List docs\nmcp__knowns__list_docs({})\n\n// View doc (smart mode)\nmcp__knowns__get_doc({ "path": "<path>", "smart": true })\n\n// Search docs\nmcp__knowns__search_docs({ "query": "<query>" })\n\n// Create doc (MUST include description)\nmcp__knowns__create_doc({\n "title": "<title>",\n "description": "<brief description of what this doc covers>",\n "tags": ["tag1", "tag2"],\n "folder": "folder"\n})\n\n// Update content\nmcp__knowns__update_doc({\n "path": "<path>",\n "content": "content"\n})\n\n// Update metadata (title, description, tags)\nmcp__knowns__update_doc({\n "path": "<path>",\n "title": "New Title",\n "description": "Updated description",\n "tags": ["new", "tags"]\n})\n\n// Update section only\nmcp__knowns__update_doc({\n "path": "<path>",\n "section": "2",\n "content": "## 2. New Content\\n\\n..."\n})\n```\n\n## Creating Documents\n\n1. Search first (avoid duplicates)\n2. Choose location:\n\n| Type | Folder |\n|------|--------|\n| Core | (root) |\n| Guide | `guides` |\n| Pattern | `patterns` |\n| API | `api` |\n\n3. Create with **title + description + tags**\n4. Add content\n5. **Validate** after creating\n\n**CRITICAL:** Always include `description` - validate will fail without it!\n\n## Updating Documents\n\n**Section edit is most efficient:**\n```json\nmcp__knowns__update_doc({\n "path": "<path>",\n "section": "3",\n "content": "## 3. New Content\\n\\n..."\n})\n```\n\n## Validate After Changes\n\n**CRITICAL:** After creating/updating docs, validate:\n\n```json\nmcp__knowns__validate({ "scope": "docs" })\n```\n\nIf errors found, fix before continuing.\n\n## Mermaid Diagrams\n\nWebUI supports mermaid rendering. Use for:\n- Architecture diagrams\n- Flowcharts\n- Sequence diagrams\n- Entity relationships\n\n````markdown\n```mermaid\ngraph TD\n A[Start] --> B{Decision}\n B -->|Yes| C[Action]\n B -->|No| D[End]\n```\n````\n\nDiagrams render automatically in WebUI preview.\n\n## Checklist\n\n- [ ] Searched for existing docs\n- [ ] Created with **description** (required!)\n- [ ] Used section editing for updates\n- [ ] Used mermaid for complex flows (optional)\n- [ ] Referenced with `@doc/<path>`\n- [ ] **Validated after changes**\n';
60174
60216
 
60175
- // src/instructions/skills/knowns.extract/SKILL.md
60176
- var SKILL_default3 = '---\nname: knowns.extract\ndescription: Use when extracting reusable patterns, solutions, or knowledge into documentation\n---\n\n# Extracting Knowledge\n\nConvert implementations, patterns, or solutions into reusable project documentation.\n\n**Announce at start:** "I\'m using the knowns.extract skill to extract knowledge."\n\n**Core principle:** ONLY EXTRACT GENERALIZABLE KNOWLEDGE.\n\n## The Process\n\n### Step 1: Identify Source\n\n**From task (if ID provided):**\n{{#if mcp}}\n```json\nmcp__knowns__get_task({ "taskId": "$ARGUMENTS" })\n```\n{{else}}\n```bash\nknowns task $ARGUMENTS --plain\n```\n{{/if}}\n\n**From current context (no arguments):**\n- Recent implementation work\n- Patterns discovered during research\n- Solutions found in conversation\n\nLook for:\n- Implementation patterns used\n- Problems solved\n- Decisions made\n- Lessons learned\n\n### Step 2: Identify Extractable Knowledge\n\n**Good candidates for extraction:**\n- Reusable code patterns\n- Error handling approaches\n- Integration patterns\n- Performance solutions\n- Security practices\n- API design decisions\n\n**NOT good for extraction:**\n- Task-specific details\n- One-time fixes\n- Context-dependent solutions\n\n### Step 3: Search for Existing Docs\n\n{{#if mcp}}\n```json\n// Check if pattern already documented\nmcp__knowns__search_docs({ "query": "<pattern/topic>" })\n\n// List related docs\nmcp__knowns__list_docs({ "tag": "pattern" })\n```\n{{else}}\n```bash\n# Check if pattern already documented\nknowns search "<pattern/topic>" --type doc --plain\n\n# List related docs\nknowns doc list --tag pattern --plain\n```\n{{/if}}\n\n**Don\'t duplicate.** Update existing docs when possible.\n\n### Step 4: Create or Update Documentation\n\n**If new pattern - create doc:**\n\n{{#if mcp}}\n```json\nmcp__knowns__create_doc({\n "title": "Pattern: <Name>",\n "description": "Reusable pattern for <purpose>",\n "tags": ["pattern", "<domain>"],\n "folder": "patterns"\n})\n```\n{{else}}\n```bash\nknowns doc create "Pattern: <Name>" \\\n -d "Reusable pattern for <purpose>" \\\n -t "pattern,<domain>" \\\n -f "patterns"\n```\n{{/if}}\n\n**Add content:**\n\n{{#if mcp}}\n```json\nmcp__knowns__update_doc({\n "path": "patterns/<name>",\n "content": "# Pattern: <Name>\\n\\n## 1. Problem\\nWhat problem this pattern solves.\\n\\n## 2. Solution\\nHow to implement the pattern.\\n\\n## 3. Example\\n```typescript\\n// Code example\\n```\\n\\n## 4. When to Use\\n- Situation 1\\n\\n## 5. Source\\nDiscovered in @task-<id>"\n})\n```\n{{else}}\n```bash\nknowns doc edit "patterns/<name>" -c "$(cat <<\'EOF\'\n# Pattern: <Name>\n\n## 1. Problem\nWhat problem this pattern solves.\n\n## 2. Solution\nHow to implement the pattern.\n\n## 3. Example\n```typescript\n// Code example\n```\n\n## 4. When to Use\n- Situation 1\n- Situation 2\n\n## 5. Source\nDiscovered in @task-<id> (or describe context)\nEOF\n)"\n```\n{{/if}}\n\n**If updating existing doc:**\n\n{{#if mcp}}\n```json\nmcp__knowns__update_doc({\n "path": "<path>",\n "appendContent": "\\n\\n## Additional: <Topic>\\n\\n<new insight or example>"\n})\n```\n{{else}}\n```bash\nknowns doc edit "<path>" -a "\n\n## Additional: <Topic>\n\n<new insight or example>\n"\n```\n{{/if}}\n\n### Step 5: Create Template (if code-generatable)\n\nIf the pattern involves repeatable code structure, create a codegen template:\n\n```bash\n# Create template skeleton\nknowns template create <pattern-name>\n```\n\n**Update template config** (`.knowns/templates/<pattern-name>/_template.yaml`):\n\n```yaml\nname: <pattern-name>\ndescription: Generate <what it creates>\ndoc: patterns/<pattern-name> # Link to the doc you just created\n\nprompts:\n - name: name\n message: Name?\n validate: required\n\nfiles:\n - template: "{{name}}.ts.hbs"\n destination: "src/{{kebabCase name}}.ts"\n```\n\n**Create template files** (`.hbs` files with Handlebars):\n\n```handlebars\n// {{name}}.ts.hbs\nexport class {{pascalCase name}} {\n // Pattern implementation\n}\n```\n\n**Link template in doc:**\n\n{{#if mcp}}\n```json\nmcp__knowns__update_doc({\n "path": "patterns/<name>",\n "appendContent": "\\n\\n## Generate\\n\\nUse @template/<pattern-name> to generate this pattern."\n})\n```\n{{else}}\n```bash\nknowns doc edit "patterns/<name>" -a "\n\n## Generate\n\nUse @template/<pattern-name> to generate this pattern.\n"\n```\n{{/if}}\n\n### Step 6: Link Back (if from task)\n\n```bash\nknowns task edit $ARGUMENTS --append-notes "\u{1F4DA} Extracted to @doc/patterns/<name>"\nknowns task edit $ARGUMENTS --append-notes "\u{1F527} Template: @template/<pattern-name>"\n```\n\n## What to Extract\n\n| Source | Extract As | Create Template? |\n|--------|------------|------------------|\n| Code pattern | Pattern doc | \u2705 Yes |\n| Component structure | Pattern doc | \u2705 Yes |\n| API endpoint pattern | Integration guide | \u2705 Yes |\n| Error solution | Troubleshooting guide | \u274C No |\n| Performance fix | Performance patterns | \u274C Usually no |\n| Security approach | Security guidelines | \u274C No |\n\n**Create template when:**\n- Pattern is repeatable (will be used multiple times)\n- Has consistent file structure\n- Can be parameterized (name, type, etc.)\n\n## Document Templates\n\n### Pattern Template\n```markdown\n# Pattern: <Name>\n\n## Problem\nWhat this solves.\n\n## Solution\nHow to implement.\n\n## Example\nWorking code.\n\n## When to Use\nWhen to apply this pattern.\n```\n\n### Guide Template\n```markdown\n# Guide: <Topic>\n\n## Overview\nWhat this covers.\n\n## Steps\n1. Step one\n2. Step two\n\n## Common Issues\n- Issue and solution\n```\n\n## Quality Checklist\n\n- [ ] Knowledge is generalizable (not task-specific)\n- [ ] Includes working example\n- [ ] Explains when to use\n- [ ] Links back to source (if applicable)\n- [ ] Tagged appropriately\n- [ ] Template created (if code-generatable)\n- [ ] Doc links to template (`@template/...`)\n- [ ] Template links to doc (`doc:` in config)\n\n## Remember\n\n- Only extract generalizable knowledge\n- Search before creating (avoid duplicates)\n- Include practical examples\n- Reference source when available\n- Tag docs for discoverability\n- **Create template for repeatable code patterns**\n- **Link doc \u2194 template bidirectionally**\n';
60217
+ // src/instructions/skills/kn:extract/SKILL.md
60218
+ var SKILL_default3 = '---\nname: kn:extract\ndescription: Use when extracting reusable patterns, solutions, or knowledge into documentation\n---\n\n# Extracting Knowledge\n\n**Announce:** "Using kn:extract to extract knowledge."\n\n**Core principle:** ONLY EXTRACT GENERALIZABLE KNOWLEDGE.\n\n## Step 1: Identify Source\n\n```json\nmcp__knowns__get_task({ "taskId": "$ARGUMENTS" })\n```\n\nLook for: patterns, problems solved, decisions made, lessons learned.\n\n## Step 2: Search for Existing Docs\n\n```json\nmcp__knowns__search_docs({ "query": "<pattern/topic>" })\n```\n\n**Don\'t duplicate.** Update existing docs when possible.\n\n## Step 3: Create Documentation\n\n```json\nmcp__knowns__create_doc({\n "title": "Pattern: <Name>",\n "description": "Reusable pattern for <purpose>",\n "tags": ["pattern", "<domain>"],\n "folder": "patterns"\n})\n\nmcp__knowns__update_doc({\n "path": "patterns/<name>",\n "content": "# Pattern: <Name>\\n\\n## Problem\\n...\\n\\n## Solution\\n...\\n\\n## Example\\n```typescript\\n// Code\\n```\\n\\n## Source\\n@task-<id>"\n})\n```\n\n## Step 4: Create Template (if code-generatable)\n\n```json\nmcp__knowns__create_template({\n "name": "<pattern-name>",\n "description": "Generate <what>",\n "doc": "patterns/<pattern-name>"\n})\n```\n\nLink template in doc:\n```json\nmcp__knowns__update_doc({\n "path": "patterns/<name>",\n "appendContent": "\\n\\n## Generate\\n\\nUse @template/<pattern-name>"\n})\n```\n\n## Step 5: Validate\n\n**CRITICAL:** After creating doc/template, validate to catch broken refs:\n\n```json\nmcp__knowns__validate({})\n```\n\nIf errors found, fix before continuing.\n\n## Step 6: Link Back to Task\n\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "appendNotes": "\u{1F4DA} Extracted to @doc/patterns/<name>"\n})\n```\n\n## What to Extract\n\n| Source | Extract As | Template? |\n|--------|------------|-----------|\n| Code pattern | Pattern doc | \u2705 Yes |\n| API pattern | Integration guide | \u2705 Yes |\n| Error solution | Troubleshooting | \u274C No |\n| Security approach | Guidelines | \u274C No |\n\n## Checklist\n\n- [ ] Knowledge is generalizable\n- [ ] Includes working example\n- [ ] Links back to source\n- [ ] Template created (if applicable)\n- [ ] **Validated (no broken refs)**\n';
60177
60219
 
60178
- // src/instructions/skills/knowns.init/SKILL.md
60179
- var SKILL_default4 = '---\nname: knowns.init\ndescription: Use at the start of a new session to read project docs, understand context, and see current state\n---\n\n# Session Initialization\n\nInitialize a session by reading project documentation and understanding current state.\n\n**Announce at start:** "I\'m using the knowns.init skill to initialize this session."\n\n**Core principle:** READ DOCS BEFORE DOING ANYTHING ELSE.\n\n## The Process\n\n### Step 1: List Available Documentation\n\n{{#if mcp}}\n```json\nmcp__knowns__list_docs({})\n```\n{{else}}\n```bash\nknowns doc list --plain\n```\n{{/if}}\n\n### Step 2: Read Core Documents\n\n**Priority order:**\n\n{{#if mcp}}\n```json\n// 1. Project overview (always read)\nmcp__knowns__get_doc({ "path": "README", "smart": true })\n\n// 2. Architecture (if exists)\nmcp__knowns__get_doc({ "path": "ARCHITECTURE", "smart": true })\n\n// 3. Conventions (if exists)\nmcp__knowns__get_doc({ "path": "CONVENTIONS", "smart": true })\n```\n{{else}}\n```bash\n# 1. Project overview (always read)\nknowns doc "README" --plain\n\n# 2. Architecture (if exists)\nknowns doc "ARCHITECTURE" --plain\n\n# 3. Conventions (if exists)\nknowns doc "CONVENTIONS" --plain\n```\n{{/if}}\n\n### Step 3: Check Current State\n\n{{#if mcp}}\n```json\n// Active timer?\nmcp__knowns__get_time_report({})\n\n// Tasks in progress\nmcp__knowns__list_tasks({ "status": "in-progress" })\n\n// Board overview\nmcp__knowns__get_board({})\n```\n{{else}}\n```bash\n# Active timer?\nknowns time status\n\n# Tasks in progress\nknowns task list --status in-progress --plain\n\n# High priority todos\nknowns task list --status todo --plain | head -20\n```\n{{/if}}\n\n### Step 4: Summarize Context\n\nProvide a brief summary:\n\n```markdown\n## Session Context\n\n### Project\n- **Name**: [from config]\n- **Purpose**: [from README]\n\n### Key Docs Available\n- README: [brief note]\n- ARCHITECTURE: [if exists]\n- CONVENTIONS: [if exists]\n\n### Current State\n- Tasks in progress: [count]\n- Active timer: [yes/no]\n\n### Ready for\n- Working on tasks\n- Creating documentation\n- Answering questions about codebase\n```\n\n## Quick Commands After Init\n\n```\n# Work on a task\n/knowns.task <id>\n\n# Search for something\n{{#if mcp}}\nmcp__knowns__search_docs({ "query": "<query>" })\n{{else}}\nknowns search "<query>" --plain\n{{/if}}\n```\n\n## When to Re-Initialize\n\n**Run init again when:**\n- Starting a new session\n- Major project changes occurred\n- Switching to different area of project\n- Context feels stale\n\n## What to Learn from Docs\n\nFrom **README**:\n- Project purpose and scope\n- Key features\n- Getting started info\n\nFrom **ARCHITECTURE**:\n- System design\n- Component structure\n- Key decisions\n\nFrom **CONVENTIONS**:\n- Coding standards\n- Naming conventions\n- File organization\n\n## Remember\n\n- Always read docs first\n- Check for active work (in-progress tasks)\n- Summarize context for reference\n- Re-init when switching areas\n';
60220
+ // src/instructions/skills/kn:implement/SKILL.md
60221
+ var SKILL_default4 = '---\nname: kn:implement\ndescription: Use when implementing a task - follow the plan, check ACs, track progress\n---\n\n# Implementing a Task\n\nExecute the implementation plan, track progress, and complete the task.\n\n**Announce:** "Using kn:implement for task [ID]."\n\n**Core principle:** CHECK AC ONLY AFTER WORK IS DONE.\n\n## Step 1: Review Task\n\n```json\nmcp__knowns__get_task({ "taskId": "$ARGUMENTS" })\n```\n\n**If task status is "done"** (reopening):\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "status": "in-progress",\n "appendNotes": "Reopened: <reason>"\n})\nmcp__knowns__start_time({ "taskId": "$ARGUMENTS" })\n```\n\nVerify: plan exists, timer running, which ACs pending.\n\n## Step 2: Check Templates\n\n```json\nmcp__knowns__list_templates({})\n```\n\nIf template exists \u2192 use it to generate boilerplate.\n\n## Step 3: Work Through Plan\n\nFor each step:\n1. Do the work\n2. Check AC (only after done!)\n3. Append note\n\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "checkAc": [1],\n "appendNotes": "Done: brief description"\n})\n```\n\n## Step 4: Handle Scope Changes\n\n**Small:** Add AC + note\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "addAc": ["New requirement"],\n "appendNotes": "Scope: added per user"\n})\n```\n\n**Large:** Stop and ask user.\n\n## Step 5: Validate & Complete\n\n1. Run tests/lint/build\n2. **Validate** to catch broken refs:\n\n```json\nmcp__knowns__validate({})\n```\n\n3. Add implementation notes (use `appendNotes`, NOT `notes`!)\n4. Stop timer + mark done\n\n```json\nmcp__knowns__stop_time({ "taskId": "$ARGUMENTS" })\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "status": "done"\n})\n```\n\n## Step 6: Extract Knowledge (optional)\n\nIf patterns discovered: `/kn:extract $ARGUMENTS`\n\n## Checklist\n\n- [ ] All ACs checked\n- [ ] Tests pass\n- [ ] **Validated (no broken refs)**\n- [ ] Notes added\n- [ ] Timer stopped\n- [ ] Status = done\n\n## Red Flags\n\n- Checking AC before work done\n- Skipping tests\n- Skipping validation\n- Using `notes` instead of `appendNotes`\n- Marking done without verification\n';
60180
60222
 
60181
- // src/instructions/skills/knowns.research/SKILL.md
60182
- var SKILL_default5 = '---\nname: knowns.research\ndescription: Use when you need to understand existing code, find patterns, or explore the codebase before implementation\n---\n\n# Researching the Codebase\n\nUnderstand existing patterns and implementation before making changes.\n\n**Announce at start:** "I\'m using the knowns.research skill to research [topic]."\n\n**Core principle:** UNDERSTAND WHAT EXISTS BEFORE ADDING NEW CODE.\n\n## The Process\n\n### Step 1: Search Documentation\n\n{{#if mcp}}\n```json\n// Search docs for topic\nmcp__knowns__search_docs({ "query": "<topic>" })\n\n// Read relevant docs\nmcp__knowns__get_doc({ "path": "<path>", "smart": true })\n```\n{{else}}\n```bash\n# Search docs for topic\nknowns search "<topic>" --type doc --plain\n\n# Read relevant docs\nknowns doc "<path>" --plain\n```\n{{/if}}\n\n### Step 2: Search Completed Tasks\n\n{{#if mcp}}\n```json\n// Find similar work that was done\nmcp__knowns__search_tasks({ "query": "<keywords>" })\n\n// View task for implementation details\nmcp__knowns__get_task({ "taskId": "<id>" })\n```\n{{else}}\n```bash\n# Find similar work that was done\nknowns search "<keywords>" --type task --status done --plain\n\n# View task for implementation details\nknowns task <id> --plain\n```\n{{/if}}\n\n**Learn from history** - completed tasks often contain valuable insights.\n\n### Step 3: Search Codebase\n\n```bash\n# Find files by name pattern\nfind . -name "*<pattern>*" -type f | grep -v node_modules | head -20\n\n# Search code content\ngrep -r "<pattern>" --include="*.ts" --include="*.tsx" -l | head -20\n```\n\n### Step 4: Analyze Patterns\n\nLook for:\n- How similar features are implemented\n- Common patterns used\n- File/folder structure conventions\n- Naming conventions\n- Error handling patterns\n\n### Step 5: Document Findings\n\n```markdown\n## Research: [Topic]\n\n### Existing Implementations\n- `src/path/file.ts`: Does X\n- `src/path/other.ts`: Handles Y\n\n### Patterns Found\n- Pattern 1: Used for...\n- Pattern 2: Applied when...\n\n### Related Docs\n- @doc/path1 - Covers X\n- @doc/path2 - Explains Y\n\n### Recommendations\nBased on research:\n1. Reuse X from Y\n2. Follow pattern Z\n3. Avoid approach W because...\n```\n\n## Research Checklist\n\n- [ ] Searched documentation\n- [ ] Reviewed similar completed tasks\n- [ ] Found existing code patterns\n- [ ] Identified reusable components\n- [ ] Noted conventions to follow\n\n## After Research\n\nUse findings in task:\n{{#if mcp}}\n```json\n// Create informed task\nmcp__knowns__create_task({\n "title": "<title>",\n "description": "Based on research: use pattern from X"\n})\n```\n{{else}}\n```bash\n# Create informed task\nknowns task create "<title>" \\\n -d "Based on research: use pattern from X" \\\n --ac "Follow pattern in src/..." \\\n --ac "Reuse component Y"\n\n# Or update existing task plan\nknowns task edit <id> --plan $\'1. Based on research...\n2. Reuse pattern from...\'\n```\n{{/if}}\n\n## What to Look For\n\n| Looking For | Where to Check |\n|-------------|----------------|\n| Conventions | @doc/CONVENTIONS, existing code |\n| Patterns | @doc/patterns/*, similar features |\n| Utilities | src/utils/*, src/lib/* |\n| Examples | Completed tasks, tests |\n| API design | Existing endpoints, @doc/api/* |\n\n## When to Research\n\n**Always research before:**\n- Implementing new features\n- Adding new patterns\n- Making architectural decisions\n\n**Skip research for:**\n- Simple bug fixes with clear cause\n- Trivial changes following obvious patterns\n\n## Remember\n\n- Check docs and tasks first\n- Look at how similar things are done\n- Note file locations for reference\n- Look at tests for expected behavior\n- Document findings for future reference\n';
60223
+ // src/instructions/skills/kn:init/SKILL.md
60224
+ var SKILL_default5 = '---\nname: kn:init\ndescription: Use at the start of a new session to read project docs, understand context, and see current state\n---\n\n# Session Initialization\n\n**Announce:** "Using kn:init to initialize session."\n\n**Core principle:** READ DOCS BEFORE DOING ANYTHING ELSE.\n\n## Step 1: List Docs\n\n```json\nmcp__knowns__list_docs({})\n```\n\n## Step 2: Read Core Docs\n\n```json\nmcp__knowns__get_doc({ "path": "README", "smart": true })\nmcp__knowns__get_doc({ "path": "ARCHITECTURE", "smart": true })\nmcp__knowns__get_doc({ "path": "CONVENTIONS", "smart": true })\n```\n\n## Step 3: Check Current State\n\n```json\nmcp__knowns__list_tasks({ "status": "in-progress" })\nmcp__knowns__get_board({})\n```\n\n## Step 4: Summarize\n\n```markdown\n## Session Context\n- **Project**: [name]\n- **Key Docs**: README, ARCHITECTURE, CONVENTIONS\n- **In-progress tasks**: [count]\n- **Ready for**: tasks, docs, questions\n```\n\n## Next Steps\n\n```\n/kn:plan <task-id> # Plan a task\n/kn:research <query> # Research codebase\n```\n';
60183
60225
 
60184
- // src/instructions/skills/knowns.task.brainstorm/SKILL.md
60185
- var SKILL_default6 = '---\nname: knowns.task.brainstorm\ndescription: Use when requirements are unclear, multiple approaches exist, or you need to explore solutions before planning\n---\n\n# Brainstorming for Tasks\n\nConvert vague requirements into concrete design through structured questioning and exploration.\n\n**Announce at start:** "I\'m using the knowns.task.brainstorm skill to explore approaches."\n\n**Core principle:** UNDERSTAND THE PROBLEM BEFORE PROPOSING SOLUTIONS.\n\n## The Process\n\n### Phase 1: Discovery\n\n**One question at a time.** Don\'t overwhelm with multiple questions.\n\nPrefer multiple-choice when possible:\n```\nWhich approach do you prefer?\nA) Quick solution with trade-offs\nB) Comprehensive solution, more effort\nC) Something else (describe)\n```\n\nQuestions to clarify:\n- What problem are we solving?\n- Who are the users/stakeholders?\n- What are the constraints?\n- What does success look like?\n\n### Phase 2: Research Existing Patterns\n\n{{#if mcp}}\n```json\n// Search docs for related patterns\nmcp__knowns__search_docs({ "query": "<topic>" })\n\n// Check how similar things were done\nmcp__knowns__search_tasks({ "query": "<keywords>" })\n```\n{{else}}\n```bash\n# Search docs for related patterns\nknowns search "<topic>" --type doc --plain\n\n# Check how similar things were done\nknowns search "<keywords>" --type task --status done --plain\n```\n{{/if}}\n\n**Learn from history** - completed tasks often contain implementation insights.\n\n### Phase 3: Explore Approaches\n\nPresent 2-3 options with trade-offs:\n\n```markdown\n## Option A: [Name]\n- **Approach**: Brief description\n- **Pros**: What\'s good\n- **Cons**: What\'s challenging\n- **Effort**: Low/Medium/High\n\n## Option B: [Name]\n- **Approach**: Brief description\n- **Pros**: What\'s good\n- **Cons**: What\'s challenging\n- **Effort**: Low/Medium/High\n```\n\n**Lead with your recommendation** and explain why.\n\n### Phase 4: Validate and Document\n\nAfter agreement:\n- Summarize the chosen approach\n- Identify potential risks\n- Define acceptance criteria\n\nIf creating a new task:\n{{#if mcp}}\n```json\nmcp__knowns__create_task({\n "title": "<title>",\n "description": "Based on brainstorm: <key decisions>",\n "acceptanceCriteria": ["Criterion 1", "Criterion 2"]\n})\n```\n{{else}}\n```bash\nknowns task create "<title>" \\\n -d "Based on brainstorm: <key decisions>" \\\n --ac "Criterion 1" \\\n --ac "Criterion 2"\n```\n{{/if}}\n\nIf updating existing task:\n{{#if mcp}}\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "description": "Updated based on brainstorm..."\n})\n```\n{{else}}\n```bash\nknowns task edit $ARGUMENTS -d "Updated based on brainstorm..."\n```\n{{/if}}\n\n## When to Use This Skill\n\n**Good candidates:**\n- Vague requirements ("make it faster", "improve UX")\n- Multiple valid approaches exist\n- Significant effort involved\n- New territory for the project\n\n**Skip for:**\n- Clear, well-defined tasks\n- Bug fixes with obvious solutions\n- Simple additions following existing patterns\n\n## Red Flags\n\n**You\'re doing it wrong if:**\n- Proposing solutions before understanding the problem\n- Asking too many questions at once\n- Not researching existing patterns first\n- Skipping trade-off analysis\n\n## Remember\n\n- One question at a time\n- Research existing patterns first\n- Present options with trade-offs\n- Lead with your recommendation\n- Document the decision\n';
60226
+ // src/instructions/skills/kn:plan/SKILL.md
60227
+ var SKILL_default6 = '---\nname: kn:plan\ndescription: Use when creating an implementation plan for a task\n---\n\n# Planning a Task\n\n**Announce:** "Using kn:plan for task [ID]."\n\n**Core principle:** GATHER CONTEXT \u2192 PLAN \u2192 VALIDATE \u2192 WAIT FOR APPROVAL.\n\n## Mode Detection\n\nCheck if `$ARGUMENTS` contains `--from`:\n- **Yes** \u2192 Go to "Generate Tasks from Spec" section\n- **No** \u2192 Continue with normal planning flow\n\n---\n\n# Normal Planning Flow\n\n## Step 1: Take Ownership\n\n```json\nmcp__knowns__get_task({ "taskId": "$ARGUMENTS" })\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "status": "in-progress",\n "assignee": "@me"\n})\nmcp__knowns__start_time({ "taskId": "$ARGUMENTS" })\n```\n\n## Step 2: Gather Context\n\nFollow refs in task:\n```json\nmcp__knowns__get_doc({ "path": "<path>", "smart": true })\nmcp__knowns__get_task({ "taskId": "<id>" })\n```\n\nSearch related:\n```json\nmcp__knowns__search_docs({ "query": "<keywords>" })\nmcp__knowns__list_templates({})\n```\n\n## Step 3: Draft Plan\n\n```markdown\n## Implementation Plan\n1. [Step] (see @doc/relevant-doc)\n2. [Step] (use @template/xxx)\n3. Add tests\n4. Update docs\n```\n\n**Tip:** Use mermaid for complex flows:\n````markdown\n```mermaid\ngraph LR\n A[Input] --> B[Process] --> C[Output]\n```\n````\n\n## Step 4: Save Plan\n\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "plan": "1. Step one\\n2. Step two\\n3. Tests"\n})\n```\n\n## Step 5: Validate\n\n**CRITICAL:** After saving plan with refs, validate to catch broken refs:\n\n```bash\nknowns validate --plain\n```\n\nIf errors found (broken `@doc/...` or `@task-...`), fix before asking approval.\n\n## Step 6: Ask Approval\n\nPresent plan and **WAIT for explicit approval**.\n\n## Next Step\n\nAfter approval: `/kn:implement $ARGUMENTS`\n\n## Checklist\n\n- [ ] Ownership taken\n- [ ] Timer started\n- [ ] Refs followed\n- [ ] Templates checked\n- [ ] **Validated (no broken refs)**\n- [ ] User approved\n\n---\n\n# Generate Tasks from Spec\n\nWhen `$ARGUMENTS` contains `--from @doc/specs/<name>`:\n\n**Announce:** "Using kn:plan to generate tasks from spec [name]."\n\n## Step 1: Read Spec Document\n\nExtract spec path from arguments (e.g., `--from @doc/specs/user-auth` \u2192 `specs/user-auth`).\n\n```json\nmcp__knowns__get_doc({ "path": "specs/<name>", "smart": true })\n```\n\n## Step 2: Parse Requirements\n\nScan spec for:\n- **Functional Requirements** (FR-1, FR-2, etc.)\n- **Acceptance Criteria** (AC-1, AC-2, etc.)\n- **Scenarios** (for edge cases)\n\nGroup related items into logical tasks.\n\n## Step 3: Generate Task Preview\n\nFor each requirement/group, create task structure:\n\n```markdown\n## Generated Tasks from specs/<name>\n\n### Task 1: [Requirement Title]\n- **Description:** [From spec]\n- **ACs:**\n - [ ] AC from spec\n - [ ] AC from spec\n- **Spec:** specs/<name>\n- **Priority:** medium\n\n### Task 2: [Requirement Title]\n- **Description:** [From spec]\n- **ACs:**\n - [ ] AC from spec\n- **Spec:** specs/<name>\n- **Priority:** medium\n\n---\nTotal: X tasks to create\n```\n\n## Step 4: Ask for Approval\n\n> I\'ve generated **X tasks** from the spec. Please review:\n> - **Approve** to create all tasks\n> - **Edit** to modify before creating\n> - **Cancel** to abort\n\n**WAIT for explicit approval.**\n\n## Step 5: Create Tasks\n\nWhen approved:\n\n```json\nmcp__knowns__create_task({\n "title": "<requirement title>",\n "description": "<from spec>",\n "spec": "specs/<name>",\n "priority": "medium",\n "labels": ["from-spec"]\n})\n```\n\nThen add ACs:\n```json\nmcp__knowns__update_task({\n "taskId": "<new-id>",\n "addAc": ["AC 1", "AC 2", "AC 3"]\n})\n```\n\nRepeat for each task.\n\n## Step 6: Summary\n\n```markdown\n## Created Tasks\n\n| ID | Title | ACs |\n|----|-------|-----|\n| task-xxx | Requirement 1 | 3 |\n| task-yyy | Requirement 2 | 2 |\n\nAll tasks linked to spec: specs/<name>\n\nNext steps:\n- Start with: `/kn:plan <first-task-id>`\n- Or view all: `knowns task list --spec specs/<name> --plain`\n```\n\n## Checklist (--from mode)\n\n- [ ] Spec document read\n- [ ] Requirements parsed\n- [ ] Tasks previewed\n- [ ] User approved\n- [ ] Tasks created with spec link\n- [ ] Summary shown\n';
60186
60228
 
60187
- // src/instructions/skills/knowns.task.implement/SKILL.md
60188
- var SKILL_default7 = '---\nname: knowns.task.implement\ndescription: Use when implementing a task - follow the plan, check ACs, track progress\n---\n\n# Implementing a Task\n\nExecute the implementation plan, track progress, and complete the task.\n\n**Announce at start:** "I\'m using the knowns.task.implement skill to implement task [ID]."\n\n**Core principle:** CHECK AC ONLY AFTER WORK IS DONE.\n\n## The Process\n\n### Step 1: Review Current State\n\n{{#if mcp}}\n```json\nmcp__knowns__get_task({ "taskId": "$ARGUMENTS" })\n```\n{{else}}\n```bash\nknowns task $ARGUMENTS --plain\n```\n{{/if}}\n\nVerify:\n- Plan exists and is approved\n- Timer is running\n- Know which ACs are pending\n\n### Step 2: Check for Applicable Templates\n\nBefore writing code, check if there\'s a template that matches:\n\n```bash\nknowns template list\n```\n\n**If template exists:**\n1. Read linked doc for context\n2. Use template to generate boilerplate\n3. Customize generated code as needed\n\n{{#if mcp}}\n```json\n// Read template\'s linked doc\nmcp__knowns__get_doc({ "path": "<template-doc>", "smart": true })\n```\n```bash\n# Generate code from template (reduces context, ensures consistency)\nknowns template run <template-name> --name "MyComponent"\n```\n{{else}}\n```bash\n# Read template\'s linked doc\nknowns doc "<template-doc>" --plain\n\n# Generate code from template (reduces context, ensures consistency)\nknowns template run <template-name> --name "MyComponent"\n```\n{{/if}}\n\n**Why use templates:**\n- Reduces context (no need to generate boilerplate)\n- Ensures consistency with project patterns\n- Faster implementation\n\n### Step 3: Work Through Plan\n\nFor each step in the plan:\n\n1. **Check for template** (use if available)\n2. **Do the work** (generate or write code)\n3. **Check related AC** (only after work is done!)\n4. **Append progress note**\n\n{{#if mcp}}\n```json\n// After completing work for AC #1:\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "checkAc": [1],\n "appendNotes": "\u2713 Done: brief description"\n})\n```\n{{else}}\n```bash\n# After completing work for AC #1:\nknowns task edit $ARGUMENTS --check-ac 1\nknowns task edit $ARGUMENTS --append-notes "\u2713 Done: brief description"\n```\n{{/if}}\n\n### Step 4: Handle Scope Changes\n\nIf new requirements emerge during implementation:\n\n**Small change:**\n{{#if mcp}}\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "addAc": ["New requirement"],\n "appendNotes": "\u26A0\uFE0F Scope: added requirement per user"\n})\n```\n{{else}}\n```bash\nknowns task edit $ARGUMENTS --ac "New requirement"\nknowns task edit $ARGUMENTS --append-notes "\u26A0\uFE0F Scope: added requirement per user"\n```\n{{/if}}\n\n**Large change:**\n- Stop and ask user\n- Consider creating follow-up task\n- Update plan if needed\n\n### Step 5: Verify & Complete\n\nWhen all ACs are checked:\n\n**1. Verify code quality:**\n```bash\nnpm test # or project\'s test command\nnpm run lint # or project\'s lint command\nnpm run build # if applicable\n```\n\n**Don\'t complete if verification fails.** Fix issues first.\n\n**2. Add implementation notes (REQUIRED for audit):**\n\nDocument all changes made for audit trail.\n\n> \u26A0\uFE0F **CRITICAL**: Use `appendNotes` (NOT `notes`). Using `notes` will DESTROY the audit trail!\n\n{{#if mcp}}\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "appendNotes": "## Implementation Complete\\n\\n### Files Changed\\n- `src/path/file.ts` - Added X\\n- `src/path/other.ts` - Modified Y\\n\\n### Key Changes\\n- Change 1: description\\n\\n### Testing\\n- Test coverage / manual testing done"\n})\n```\n{{else}}\n```bash\nknowns task edit $ARGUMENTS --append-notes $\'\n## Implementation Complete\n\n### Files Changed\n- `src/path/file.ts` - Added X\n- `src/path/other.ts` - Modified Y\n- `tests/file.test.ts` - Added tests\n\n### Key Changes\n- Change 1: description\n- Change 2: description\n\n### Testing\n- Test coverage / manual testing done\n\'\n```\n{{/if}}\n\n**IMPORTANT:** Always use `appendNotes` (not `notes`) to preserve audit trail.\n\n**3. Stop timer and mark done:**\n\n{{#if mcp}}\n```json\nmcp__knowns__stop_time({ "taskId": "$ARGUMENTS" })\n```\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "status": "done"\n})\n```\n{{else}}\n```bash\nknowns time stop\nknowns task edit $ARGUMENTS -s done\n```\n{{/if}}\n\n### Step 6: Consider Knowledge Extraction\n\nIf generalizable patterns were discovered:\n\n```\n/knowns.extract $ARGUMENTS\n```\n\n## Progress Tracking\n\nUse concise notes:\n\n{{#if mcp}}\n```json\n// Good\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "appendNotes": "\u2713 Auth middleware implemented"\n})\n\n// Bad (too verbose)\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "appendNotes": "I have successfully completed..."\n})\n```\n{{else}}\n```bash\n# Good\nknowns task edit $ARGUMENTS --append-notes "\u2713 Auth middleware implemented"\n\n# Bad (too verbose)\nknowns task edit $ARGUMENTS --append-notes "I have successfully completed..."\n```\n{{/if}}\n\n## Completion Checklist\n\n- [ ] All ACs checked\n- [ ] Tests pass\n- [ ] Lint clean\n- [ ] Implementation notes added (with file changes for audit)\n- [ ] Timer stopped\n- [ ] Status set to `done`\n- [ ] Knowledge extracted (if applicable)\n\n## Red Flags\n\n**You\'re doing it wrong if:**\n- Checking AC before work is actually complete\n- Making changes not in the approved plan (without asking)\n- Skipping tests\n- Not tracking progress with notes\n- Marking done without verification\n\n## When to Stop\n\n**STOP and ask when:**\n- Requirements unclear or contradictory\n- Approach isn\'t working after 2-3 attempts\n- Need changes outside approved scope\n- Hit unexpected blocker\n\n## If Verification Fails\n\n**Tests failing:**\n1. Keep task in-progress\n2. Fix the issue\n3. Re-run verification\n\n**Forgot to stop timer:**\n{{#if mcp}}\n```json\nmcp__knowns__add_time({\n "taskId": "$ARGUMENTS",\n "duration": "<duration>",\n "note": "Timer correction"\n})\n```\n{{else}}\n```bash\nknowns time add $ARGUMENTS <duration> -n "Timer correction"\n```\n{{/if}}\n\n## Remember\n\n- Check AC only AFTER work is done\n- Use templates when available\n- Track progress with notes\n- Ask before scope changes\n- Follow the approved plan\n- Verify before marking done\n- Always stop the timer\n- Consider knowledge extraction\n';
60229
+ // src/instructions/skills/kn:research/SKILL.md
60230
+ var SKILL_default7 = '---\nname: kn:research\ndescription: Use when you need to understand existing code, find patterns, or explore the codebase before implementation\n---\n\n# Researching the Codebase\n\n**Announce:** "Using kn:research for [topic]."\n\n**Core principle:** UNDERSTAND WHAT EXISTS BEFORE ADDING NEW CODE.\n\n## Step 1: Search Documentation\n\n```json\nmcp__knowns__search_docs({ "query": "<topic>" })\nmcp__knowns__get_doc({ "path": "<path>", "smart": true })\n```\n\n## Step 2: Search Completed Tasks\n\n```json\nmcp__knowns__search_tasks({ "query": "<keywords>" })\nmcp__knowns__get_task({ "taskId": "<id>" })\n```\n\n## Step 3: Search Codebase\n\n```bash\nfind . -name "*<pattern>*" -type f | grep -v node_modules | head -20\ngrep -r "<pattern>" --include="*.ts" -l | head -20\n```\n\n## Step 4: Document Findings\n\n```markdown\n## Research: [Topic]\n\n### Existing Implementations\n- `src/path/file.ts`: Does X\n\n### Patterns Found\n- Pattern 1: Used for...\n\n### Related Docs\n- @doc/path1 - Covers X\n\n### Recommendations\n1. Reuse X from Y\n2. Follow pattern Z\n```\n\n## Checklist\n\n- [ ] Searched documentation\n- [ ] Reviewed similar completed tasks\n- [ ] Found existing code patterns\n- [ ] Identified reusable components\n\n## Next Step\n\nAfter research: `/kn:plan <task-id>`\n';
60189
60231
 
60190
- // src/instructions/skills/knowns.task.plan/SKILL.md
60191
- var SKILL_default8 = '---\nname: knowns.task.plan\ndescription: Use when creating an implementation plan for a task\n---\n\n# Planning a Task\n\nTake ownership, gather context, create implementation plan, and get user approval.\n\n**Announce at start:** "I\'m using the knowns.task.plan skill to plan task [ID]."\n\n**Core principle:** GATHER CONTEXT \u2192 PLAN \u2192 WAIT FOR APPROVAL.\n\n## The Process\n\n### Step 1: View Task & Take Ownership\n\n{{#if mcp}}\n```json\nmcp__knowns__get_task({ "taskId": "$ARGUMENTS" })\n```\n\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "status": "in-progress",\n "assignee": "@me"\n})\n```\n\n```json\nmcp__knowns__start_time({ "taskId": "$ARGUMENTS" })\n```\n{{else}}\n```bash\nknowns task $ARGUMENTS --plain\nknowns task edit $ARGUMENTS -s in-progress -a @me\nknowns time start $ARGUMENTS\n```\n{{/if}}\n\n**Timer is mandatory.** Time data is used for estimation.\n\n### Step 2: Gather Context\n\n**Follow all refs in task:**\n\n{{#if mcp}}\n```json\n// @doc/<path> \u2192\nmcp__knowns__get_doc({ "path": "<path>", "smart": true })\n\n// @task-<id> \u2192\nmcp__knowns__get_task({ "taskId": "<id>" })\n```\n{{else}}\n```bash\n# @doc/<path> \u2192\nknowns doc "<path>" --plain\n\n# @task-<id> \u2192\nknowns task <id> --plain\n```\n{{/if}}\n\n**Search for related context:**\n\n{{#if mcp}}\n```json\nmcp__knowns__search_docs({ "query": "<keywords>" })\nmcp__knowns__search_tasks({ "query": "<keywords>" })\n```\n{{else}}\n```bash\nknowns search "<keywords>" --type doc --plain\nknowns search "<keywords>" --type task --status done --plain\n```\n{{/if}}\n\n**Check for templates:**\n\n```bash\nknowns template list\n```\n\n### Step 3: Draft Implementation Plan\n\nStructure your plan:\n\n```markdown\n## Implementation Plan\n\n1. [Step] (see @doc/relevant-doc)\n2. [Step] (use @template/xxx if available)\n3. Add tests\n4. Update documentation\n```\n\n**Plan guidelines:**\n- Reference relevant docs with `@doc/<path>`\n- Reference templates with `@template/<name>`\n- Include testing step\n- Include doc updates if needed\n- Keep steps actionable and specific\n\n### Step 4: Present to User\n\nShow the plan and **ASK for approval**:\n\n```markdown\nHere\'s my implementation plan for task [ID]:\n\n1. Step one (see @doc/xxx)\n2. Generate boilerplate with @template/xxx\n3. Customize implementation\n4. Add unit tests\n5. Update API docs\n\nShall I proceed with this plan?\n```\n\n**WAIT for explicit approval.**\n\n### Step 5: Save Plan (after approval)\n\n> \u26A0\uFE0F **Use `appendNotes` (NOT `notes`)** to preserve audit trail:\n\n{{#if mcp}}\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "plan": "1. Step one (see @doc/xxx)\\n2. Step two\\n3. Add unit tests\\n4. Update API docs",\n "appendNotes": "\u{1F4CB} Plan approved, starting implementation"\n})\n```\n{{else}}\n```bash\nknowns task edit $ARGUMENTS --plan $\'1. Step one (see @doc/xxx)\n2. Step two\n3. Add unit tests\n4. Update API docs\'\nknowns task edit $ARGUMENTS --append-notes "\u{1F4CB} Plan approved, starting implementation"\n```\n{{/if}}\n\n## Plan Quality Checklist\n\n- [ ] Task ownership taken (status: in-progress)\n- [ ] Timer started\n- [ ] All refs followed\n- [ ] Related docs/tasks searched\n- [ ] Templates identified (if any)\n- [ ] Steps are specific and actionable\n- [ ] Includes relevant doc/template references\n- [ ] Includes testing\n- [ ] User has approved\n\n## Next Step\n\nAfter plan is approved:\n\n```\n/knowns.task.implement $ARGUMENTS\n```\n\n## When Plan Isn\'t Clear\n\nIf requirements are unclear or multiple approaches exist:\n\n```\n/knowns.task.brainstorm $ARGUMENTS\n```\n\n## Remember\n\n- Take ownership and start timer first\n- Gather context before planning\n- Check for templates to use\n- Never implement without approved plan\n- Reference docs and templates in the plan\n';
60232
+ // src/instructions/skills/kn:spec/SKILL.md
60233
+ var SKILL_default8 = '---\nname: kn:spec\ndescription: Use when creating a specification document for a feature (SDD workflow)\n---\n\n# Creating a Spec Document\n\nCreate a specification document for a feature using SDD (Spec-Driven Development).\n\n**Announce:** "Using kn:spec to create spec for [name]."\n\n**Core principle:** SPEC FIRST \u2192 REVIEW \u2192 APPROVE \u2192 THEN PLAN TASKS.\n\n## Step 1: Get Feature Name\n\nIf `$ARGUMENTS` provided, use it as spec name.\n\nIf no arguments, ask user:\n> What feature are you speccing? (e.g., "user-auth", "payment-flow")\n\n## Step 2: Gather Requirements\n\nAsk user to describe the feature:\n> Please describe the feature requirements. What should it do?\n\nListen for:\n- Core functionality\n- User stories / scenarios\n- Edge cases\n- Non-functional requirements\n\n## Step 3: Create Spec Document\n\n```json\nmcp__knowns__create_doc({\n "title": "<Feature Name>",\n "description": "Specification for <feature>",\n "folder": "specs",\n "tags": ["spec", "draft"],\n "content": "<spec content>"\n})\n```\n\n**Spec Template:**\n\n```markdown\n## Overview\n\nBrief description of the feature and its purpose.\n\n## Requirements\n\n### Functional Requirements\n- FR-1: [Requirement description]\n- FR-2: [Requirement description]\n\n### Non-Functional Requirements\n- NFR-1: [Performance, security, etc.]\n\n## Acceptance Criteria\n\n- [ ] AC-1: [Testable criterion]\n- [ ] AC-2: [Testable criterion]\n- [ ] AC-3: [Testable criterion]\n\n## Scenarios\n\n### Scenario 1: [Happy Path]\n**Given** [context]\n**When** [action]\n**Then** [expected result]\n\n### Scenario 2: [Edge Case]\n**Given** [context]\n**When** [action]\n**Then** [expected result]\n\n## Technical Notes\n\nOptional implementation hints or constraints.\n\n## Open Questions\n\n- [ ] Question 1?\n- [ ] Question 2?\n```\n\n## Step 4: Ask for Review\n\nPresent the spec and ask:\n> Please review this spec:\n> - **Approve** if requirements are complete\n> - **Edit** if you want to modify something\n> - **Add more** if requirements are missing\n\n## Step 5: Handle Response\n\n**If approved:**\n```json\nmcp__knowns__update_doc({\n "path": "specs/<name>",\n "tags": ["spec", "approved"]\n})\n```\n\nThen suggest:\n> Spec approved! Ready to create tasks?\n> Run: `/kn:plan --from @doc/specs/<name>`\n\n**If edit requested:**\nUpdate the spec based on feedback and return to Step 4.\n\n**If add more:**\nGather additional requirements and update spec.\n\n## Checklist\n\n- [ ] Feature name determined\n- [ ] Requirements gathered\n- [ ] Spec created in specs/ folder\n- [ ] Includes: Overview, Requirements, ACs, Scenarios\n- [ ] User reviewed\n- [ ] Status updated (draft \u2192 approved)\n- [ ] Next step suggested (/kn:plan --from)\n\n## Red Flags\n\n- Creating spec without user input\n- Skipping review step\n- Approving without explicit user confirmation\n- Not suggesting task creation after approval\n';
60192
60234
 
60193
- // src/instructions/skills/knowns.task.reopen/SKILL.md
60194
- var SKILL_default9 = '---\nname: knowns.task.reopen\ndescription: Use when reopening a completed task to add new requirements, fix issues, or extend functionality\n---\n\n# Reopening Tasks\n\nReopen completed tasks properly with time tracking and requirement documentation.\n\n**Announce at start:** "I\'m using the knowns.task.reopen skill to reopen task [ID]."\n\n**Core principle:** DOCUMENT WHY THE TASK IS REOPENED.\n\n## The Process\n\n### Step 1: View Current Task State\n\n{{#if mcp}}\n```json\nmcp__knowns__get_task({ "taskId": "$ARGUMENTS" })\n```\n{{else}}\n```bash\nknowns task $ARGUMENTS --plain\n```\n{{/if}}\n\nVerify:\n- Task is currently `done`\n- Understand what was implemented\n- Review implementation notes\n\n### Step 2: Reopen and Start Timer\n\n{{#if mcp}}\n```json\n// Set back to in-progress\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "status": "in-progress"\n})\n\n// Start timer (REQUIRED)\nmcp__knowns__start_time({ "taskId": "$ARGUMENTS" })\n```\n{{else}}\n```bash\n# Set back to in-progress\nknowns task edit $ARGUMENTS -s in-progress\n\n# Start timer (REQUIRED)\nknowns time start $ARGUMENTS\n```\n{{/if}}\n\n### Step 3: Document Reopen Reason\n\n> \u26A0\uFE0F **Use `appendNotes` (NOT `notes`)** to preserve existing audit trail:\n\n{{#if mcp}}\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "appendNotes": "\u{1F504} Reopened: <reason>"\n})\n```\n{{else}}\n```bash\nknowns task edit $ARGUMENTS --append-notes "\u{1F504} Reopened: <reason>"\n```\n{{/if}}\n\n**Common reasons:**\n- User requested changes\n- Bug found in implementation\n- New requirements added\n- Missed acceptance criteria\n\n### Step 4: Add New Requirements\n\n{{#if mcp}}\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "addAc": ["New requirement 1", "Fix: issue description"]\n})\n```\n{{else}}\n```bash\n# Add new acceptance criteria\nknowns task edit $ARGUMENTS --ac "New requirement 1"\nknowns task edit $ARGUMENTS --ac "Fix: issue description"\n```\n{{/if}}\n\n### Step 5: Update Plan (if needed)\n\n{{#if mcp}}\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "plan": "Previous plan + new steps:\\n1. Original step (done)\\n2. Original step (done)\\n3. NEW: Address new requirement\\n4. NEW: Fix reported issue"\n})\n```\n{{else}}\n```bash\nknowns task edit $ARGUMENTS --plan $\'Previous plan + new steps:\n1. Original step (done)\n2. Original step (done)\n3. NEW: Address new requirement\n4. NEW: Fix reported issue\'\n```\n{{/if}}\n\n**Present updated plan and WAIT for approval.**\n\n### Step 6: Implement and Complete\n\nFollow normal task completion flow:\n\n{{#if mcp}}\n```json\n// Check new ACs as completed\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "checkAc": [<new-index>],\n "appendNotes": "\u2713 Done: new requirement"\n})\n\n// Stop timer\nmcp__knowns__stop_time({ "taskId": "$ARGUMENTS" })\n\n// Mark done again\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "status": "done"\n})\n```\n{{else}}\n```bash\n# Check new ACs as completed\nknowns task edit $ARGUMENTS --check-ac <new-index>\nknowns task edit $ARGUMENTS --append-notes "\u2713 Done: new requirement"\n\n# Stop timer\nknowns time stop\n\n# Mark done again\nknowns task edit $ARGUMENTS -s done\n```\n{{/if}}\n\n## When to Reopen vs Create New Task\n\n| Reopen Existing | Create New Task |\n|-----------------|-----------------|\n| Small fix/change | Major new feature |\n| Related to original work | Unrelated work |\n| Same context needed | Different context |\n| Quick addition | Significant scope |\n\n**Rule of thumb:** If it takes < 30 mins and relates to original task, reopen. Otherwise, create new task with reference.\n\n## Creating Follow-up Task Instead\n\n{{#if mcp}}\n```json\nmcp__knowns__create_task({\n "title": "Follow-up: <description>",\n "description": "Related to @task-$ARGUMENTS"\n})\n```\n{{else}}\n```bash\nknowns task create "Follow-up: <description>" \\\n -d "Related to @task-$ARGUMENTS" \\\n --ac "New requirement"\n```\n{{/if}}\n\n## Remember\n\n- Always document reopen reason\n- Start timer when reopening\n- Add new AC for traceability\n- Stop timer when done\n- Consider if new task is more appropriate\n';
60235
+ // src/instructions/skills/kn:template/SKILL.md
60236
+ var SKILL_default9 = '---\nname: kn:template\ndescription: Use when generating code from templates - list, run, or create templates\n---\n\n# Working with Templates\n\n**Announce:** "Using kn:template to work with templates."\n\n**Core principle:** USE TEMPLATES FOR CONSISTENT CODE GENERATION.\n\n## Step 1: List Templates\n\n```json\nmcp__knowns__list_templates({})\n```\n\n## Step 2: Get Template Details\n\n```json\nmcp__knowns__get_template({ "name": "<template-name>" })\n```\n\nCheck: prompts, `doc:` link, files to generate.\n\n## Step 3: Read Linked Documentation\n\n```json\nmcp__knowns__get_doc({ "path": "<doc-path>", "smart": true })\n```\n\n## Step 4: Run Template\n\n```json\n// Dry run first\nmcp__knowns__run_template({\n "name": "<template-name>",\n "variables": { "name": "MyComponent" },\n "dryRun": true\n})\n\n// Then run for real\nmcp__knowns__run_template({\n "name": "<template-name>",\n "variables": { "name": "MyComponent" },\n "dryRun": false\n})\n```\n\n## Step 5: Create New Template\n\n```json\nmcp__knowns__create_template({\n "name": "<template-name>",\n "description": "Description",\n "doc": "patterns/<related-doc>"\n})\n```\n\n## Template Config\n\n```yaml\nname: react-component\ndescription: Create a React component\ndoc: patterns/react-component\n\nprompts:\n - name: name\n message: Component name?\n validate: required\n\nfiles:\n - template: ".tsx.hbs"\n destination: "src/components//.tsx"\n```\n\n## CRITICAL: Syntax Pitfalls\n\n**NEVER write `$` + triple-brace:**\n```\n// \u274C WRONG\n$` + `{` + `{` + `{camelCase name}`\n\n// \u2705 CORRECT - add space, use ~\n${ {{~camelCase name~}}}\n```\n\n## Checklist\n\n- [ ] Listed available templates\n- [ ] Read linked documentation\n- [ ] Ran dry run first\n- [ ] Verified generated files\n';
60195
60237
 
60196
- // src/instructions/skills/knowns.task/SKILL.md
60197
- var SKILL_default10 = '---\nname: knowns.task\ndescription: Use when working on a Knowns task - view task details and decide next action\n---\n\n# Working on a Task\n\nView task details and determine the appropriate next action.\n\n**Announce at start:** "I\'m using the knowns.task skill to view task [ID]."\n\n**Core principle:** VIEW AND ROUTE - analyze state, suggest next skill.\n\n## The Process\n\n### Step 1: View Task\n\n{{#if mcp}}\n```json\nmcp__knowns__get_task({ "taskId": "$ARGUMENTS" })\n```\n{{else}}\n```bash\nknowns task $ARGUMENTS --plain\n```\n{{/if}}\n\n### Step 2: Analyze State\n\nCheck:\n- **Status**: todo, in-progress, done?\n- **Assignee**: Assigned to someone?\n- **AC**: Any checked? All checked?\n- **Plan**: Has implementation plan?\n- **Refs**: Any `@doc/` or `@task-` references?\n\n### Step 3: Suggest Next Action\n\nBased on task state, recommend the appropriate skill:\n\n| State | Next Skill |\n|-------|------------|\n| `todo`, not started | `knowns.task.plan` |\n| `in-progress`, no plan | `knowns.task.plan` |\n| `in-progress`, has plan | `knowns.task.implement` |\n| `done`, needs changes | `knowns.task.reopen` |\n| Requirements unclear | `knowns.task.brainstorm` |\n\n### Step 4: Follow Refs (if needed)\n\nIf task has references, follow them for context:\n\n{{#if mcp}}\n```json\n// Doc ref: @doc/path \u2192\nmcp__knowns__get_doc({ "path": "<path>", "smart": true })\n\n// Task ref: @task-<id> \u2192\nmcp__knowns__get_task({ "taskId": "<id>" })\n```\n{{else}}\n```bash\n# Doc ref: @doc/path \u2192\nknowns doc "<path>" --plain\n\n# Task ref: @task-<id> \u2192\nknowns task <id> --plain\n```\n{{/if}}\n\n## Quick Actions\n\n**Start planning (includes taking ownership):**\n```\n/knowns.task.plan $ARGUMENTS\n```\n\n**Continue implementing:**\n```\n/knowns.task.implement $ARGUMENTS\n```\n\n**Requirements unclear:**\n```\n/knowns.task.brainstorm $ARGUMENTS\n```\n\n**Reopen completed task:**\n```\n/knowns.task.reopen $ARGUMENTS\n```\n\n## Remember\n\n- This skill is for viewing and routing\n- Use `plan` to start a new task (takes ownership, starts timer)\n- Use `implement` to continue/complete in-progress tasks\n- Always follow refs for full context\n';
60198
-
60199
- // src/instructions/skills/knowns.template/SKILL.md
60200
- var SKILL_default11 = '---\nname: knowns.template\ndescription: Use when generating code from templates - list, run, or create templates\n---\n\n# Working with Templates\n\nGenerate code from predefined templates stored in `.knowns/templates/`.\n\n**Announce at start:** "I\'m using the knowns.template skill to work with templates."\n\n**Core principle:** USE TEMPLATES FOR CONSISTENT CODE GENERATION.\n\n## The Process\n\n### Step 1: List Available Templates\n\n{{#if mcp}}\n```json\nmcp__knowns__list_templates({})\n```\n{{else}}\n```bash\nknowns template list\n```\n{{/if}}\n\n### Step 2: Get Template Details\n\n{{#if mcp}}\n```json\nmcp__knowns__get_template({ "name": "<template-name>" })\n```\n{{else}}\n```bash\nknowns template info <template-name>\n```\n{{/if}}\n\nCheck:\n- Required variables (prompts)\n- Linked documentation (`doc:`)\n- Files that will be generated\n\n### Step 3: Read Linked Documentation\n\nIf template has a `doc:` field, read it first:\n\n{{#if mcp}}\n```json\nmcp__knowns__get_doc({ "path": "<doc-path>", "smart": true })\n```\n{{else}}\n```bash\nknowns doc "<doc-path>" --plain\n```\n{{/if}}\n\n### Step 4: Run Template\n\n{{#if mcp}}\n```json\n// Dry run first (preview)\nmcp__knowns__run_template({\n "name": "<template-name>",\n "variables": { "name": "MyComponent", "type": "page" },\n "dryRun": true\n})\n\n// Then run for real\nmcp__knowns__run_template({\n "name": "<template-name>",\n "variables": { "name": "MyComponent", "type": "page" },\n "dryRun": false\n})\n```\n{{else}}\n```bash\n# Dry run (preview)\nknowns template run <template-name> --name "MyComponent" --dry-run\n\n# Run for real\nknowns template run <template-name> --name "MyComponent"\n```\n{{/if}}\n\n### Step 5: Create New Template\n\n{{#if mcp}}\n```json\nmcp__knowns__create_template({\n "name": "<template-name>",\n "description": "Template description",\n "doc": "patterns/<related-doc>" // Optional: link to documentation\n})\n```\n{{else}}\n```bash\nknowns template create <template-name>\n```\n{{/if}}\n\nThis creates:\n```\n.knowns/templates/<template-name>/\n \u251C\u2500\u2500 _template.yaml # Config\n \u2514\u2500\u2500 example.ts.hbs # Example file\n```\n\n## Template Config (`_template.yaml`)\n\n```yaml\nname: react-component\ndescription: Create a React component with tests\ndoc: patterns/react-component # Link to documentation\n\nprompts:\n - name: name\n message: Component name?\n validate: required\n\n - name: type\n message: Component type?\n type: select\n choices:\n - page\n - component\n - layout\n\nfiles:\n - template: "{{name}}.tsx.hbs"\n destination: "src/components/{{pascalCase name}}/{{pascalCase name}}.tsx"\n\n - template: "{{name}}.test.tsx.hbs"\n destination: "src/components/{{pascalCase name}}/{{pascalCase name}}.test.tsx"\n condition: "{{includeTests}}"\n```\n\n## Template-Doc Linking\n\nTemplates can reference docs and vice versa:\n\n**In `_template.yaml`:**\n```yaml\ndoc: patterns/react-component\n```\n\n**In doc (markdown):**\n```markdown\nUse @template/react-component to generate.\n```\n\n**AI workflow:**\n1. Get template config\n2. Follow `doc:` link to understand patterns\n3. Run template with appropriate variables\n\n## Handlebars Helpers\n\nTemplates use Handlebars with built-in helpers:\n\n| Helper | Example | Output |\n|--------|---------|--------|\n| `camelCase` | `{{camelCase "my name"}}` | `myName` |\n| `pascalCase` | `{{pascalCase "my name"}}` | `MyName` |\n| `kebabCase` | `{{kebabCase "MyName"}}` | `my-name` |\n| `snakeCase` | `{{snakeCase "MyName"}}` | `my_name` |\n| `upperCase` | `{{upperCase "name"}}` | `NAME` |\n| `lowerCase` | `{{lowerCase "NAME"}}` | `name` |\n\n## CRITICAL: Template Syntax Pitfalls\n\n### JavaScript Template Literals + Handlebars\n\n**NEVER write `$` followed by triple-brace** - Handlebars interprets triple-brace as unescaped output:\n\n```\n// \u274C WRONG - Parse error!\nthis.logger.log(`Created: $` + `\\{{\\{camelCase entity}.id}`);\n\n// \u2705 CORRECT - Add space, use ~ to trim whitespace\nthis.logger.log(`Created: ${ \\{{~camelCase entity~}}.id}`);\n// Output: this.logger.log(`Created: ${product.id}`);\n```\n\n**Rules when writing .hbs templates:**\n1. Never `$` + triple-brace - always add space: `${ \\{{`\n2. Use `~` (tilde) to trim whitespace: `\\{{~helper~}}`\n3. For literal braces, escape with backslash\n\n## When to Use Templates\n\n| Scenario | Action |\n|----------|--------|\n| Creating new component | Run `react-component` template |\n| Adding API endpoint | Run `api-endpoint` template |\n| Setting up new feature | Run `feature-module` template |\n| Consistent file structure | Use template instead of copy-paste |\n\n## Integrated Workflows\n\n### During Implementation (Use Template)\n\n```\nTask \u2192 Read Context \u2192 Find Template \u2192 Generate Code \u2192 Customize\n```\n\n1. Read task and understand requirements\n2. List templates to find applicable one\n3. Get template details and read linked doc\n4. Run template (dry run first, then real)\n5. Customize generated code as needed\n6. Continue with remaining implementation\n\n**Benefits:**\n- Reduces context (no need to generate boilerplate)\n- Ensures consistency with project patterns\n- Faster implementation\n\n### During Extract (Create Template)\n\n```\nContext \u2192 Identify Pattern \u2192 Create Doc \u2192 Create Template \u2192 Link Both\n```\n\n1. Identify repeatable code pattern\n2. Create doc with `/knowns.extract`\n3. Create template with `knowns template create <name>`\n4. Link template to doc: `doc: patterns/<name>`\n5. Link doc to template: `@template/<name>`\n\n**When to create template:**\n- Pattern will be used multiple times\n- Has consistent file structure\n- Can be parameterized\n\n## Checklist\n\n- [ ] Listed available templates\n- [ ] Got template details (prompts, files)\n- [ ] Read linked documentation (if any)\n- [ ] Understood required variables\n- [ ] Ran dry run first\n- [ ] Ran template with correct inputs\n- [ ] Verified generated files\n\n## Remember\n\n- Always dry run first before writing files\n- Check `doc:` link in template for context\n- Templates ensure consistent code structure\n- Create new templates for repeated patterns\n- **NEVER write `$` + triple-brace** - use `${ \\{{~helper~}}` instead (add space, use tilde)\n';
60238
+ // src/instructions/skills/kn:verify/SKILL.md
60239
+ var SKILL_default10 = '---\nname: kn:verify\ndescription: Use when running SDD verification and coverage reporting\n---\n\n# SDD Verification\n\nRun validation with SDD-awareness to check spec coverage and task status.\n\n**Announce:** "Using kn:verify to check SDD status."\n\n**Core principle:** VERIFY SPEC COVERAGE \u2192 REPORT WARNINGS \u2192 SUGGEST FIXES.\n\n## Step 1: Run SDD Validation\n\n### Via CLI\n```bash\nknowns validate --sdd --plain\n```\n\n### Via MCP (if available)\n```json\nmcp__knowns__validate({ "scope": "sdd" })\n```\n\n## Step 2: Present SDD Status Report\n\nDisplay the results in this format:\n\n```\nSDD Status Report\n\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\nSpecs: X total | Y approved | Z draft\nTasks: X total | Y done | Z in-progress | W todo\nCoverage: X/Y tasks linked to specs (Z%)\n\n\u26A0\uFE0F Warnings:\n - task-XX has no spec reference\n - specs/feature: X/Y ACs incomplete\n\n\u2705 Passed:\n - All spec references resolve\n - specs/auth: fully implemented\n```\n\n## Step 3: Analyze Results\n\n**Good coverage (>80%):**\n> SDD coverage is healthy. All tasks are properly linked to specs.\n\n**Medium coverage (50-80%):**\n> Some tasks are missing spec references. Consider:\n> - Link existing tasks to specs: `knowns task edit <id> --spec specs/<name>`\n> - Create specs for unlinked work: `/kn:spec <feature-name>`\n\n**Low coverage (<50%):**\n> Many tasks lack spec references. For better traceability:\n> 1. Create specs for major features: `/kn:spec <feature>`\n> 2. Link tasks to specs: `knowns task edit <id> --spec specs/<name>`\n> 3. Use `/kn:plan --from @doc/specs/<name>` for new tasks\n\n## Step 4: Suggest Actions\n\nBased on warnings, suggest specific fixes:\n\n**For tasks without spec:**\n> Link task to spec:\n> ```bash\n> knowns task edit <id> --spec specs/<name>\n> ```\n\n**For incomplete ACs:**\n> Check task progress:\n> ```bash\n> knowns task <id> --plain\n> ```\n\n**For approved specs without tasks:**\n> Create tasks from spec:\n> ```\n> /kn:plan --from @doc/specs/<name>\n> ```\n\n## Checklist\n\n- [ ] Ran validate --sdd\n- [ ] Presented status report\n- [ ] Analyzed coverage level\n- [ ] Suggested specific fixes for warnings\n\n## Red Flags\n\n- Ignoring warnings\n- Not suggesting actionable fixes\n- Skipping coverage analysis\n';
60201
60240
 
60202
60241
  // src/instructions/skills/index.ts
60203
60242
  function parseSkillFrontmatter(content) {
@@ -60224,17 +60263,16 @@ function createSkill(content, folderName) {
60224
60263
  content: content.trim()
60225
60264
  };
60226
60265
  }
60227
- var SKILL_TASK = createSkill(SKILL_default10, "knowns.task");
60228
- var SKILL_TASK_PLAN = createSkill(SKILL_default8, "knowns.task.plan");
60229
- var SKILL_TASK_IMPLEMENT = createSkill(SKILL_default7, "knowns.task.implement");
60230
- var SKILL_TASK_BRAINSTORM = createSkill(SKILL_default6, "knowns.task.brainstorm");
60231
- var SKILL_TASK_REOPEN = createSkill(SKILL_default9, "knowns.task.reopen");
60232
- var SKILL_EXTRACT = createSkill(SKILL_default3, "knowns.extract");
60233
- var SKILL_DOC = createSkill(SKILL_default2, "knowns.doc");
60234
- var SKILL_COMMIT = createSkill(SKILL_default, "knowns.commit");
60235
- var SKILL_INIT = createSkill(SKILL_default4, "knowns.init");
60236
- var SKILL_RESEARCH = createSkill(SKILL_default5, "knowns.research");
60237
- var SKILL_TEMPLATE = createSkill(SKILL_default11, "knowns.template");
60266
+ var SKILL_INIT = createSkill(SKILL_default5, "kn:init");
60267
+ var SKILL_PLAN = createSkill(SKILL_default6, "kn:plan");
60268
+ var SKILL_IMPLEMENT = createSkill(SKILL_default4, "kn:implement");
60269
+ var SKILL_RESEARCH = createSkill(SKILL_default7, "kn:research");
60270
+ var SKILL_COMMIT = createSkill(SKILL_default, "kn:commit");
60271
+ var SKILL_EXTRACT = createSkill(SKILL_default3, "kn:extract");
60272
+ var SKILL_DOC = createSkill(SKILL_default2, "kn:doc");
60273
+ var SKILL_TEMPLATE = createSkill(SKILL_default9, "kn:template");
60274
+ var SKILL_SPEC = createSkill(SKILL_default8, "kn:spec");
60275
+ var SKILL_VERIFY = createSkill(SKILL_default10, "kn:verify");
60238
60276
 
60239
60277
  // src/mcp/handlers/template.ts
60240
60278
  function getTemplatesDir() {
@@ -60766,6 +60804,657 @@ async function handleSearch(args, fileStore) {
60766
60804
  });
60767
60805
  }
60768
60806
 
60807
+ // src/mcp/handlers/validate.ts
60808
+ import { existsSync as existsSync16 } from "node:fs";
60809
+ import { readFile as readFile10, readdir as readdir9, writeFile as writeFile7 } from "node:fs/promises";
60810
+ import { join as join20 } from "node:path";
60811
+ var import_gray_matter6 = __toESM(require_gray_matter(), 1);
60812
+ var import_handlebars2 = __toESM(require_lib(), 1);
60813
+ init_config();
60814
+ var validateTools = [
60815
+ {
60816
+ name: "validate",
60817
+ description: "Validate tasks, docs, and templates for reference integrity and quality. Returns errors, warnings, and info about broken refs, missing AC, orphan docs, etc. Use scope='sdd' for SDD (Spec-Driven Development) validation.",
60818
+ inputSchema: {
60819
+ type: "object",
60820
+ properties: {
60821
+ scope: {
60822
+ type: "string",
60823
+ enum: ["all", "tasks", "docs", "templates", "sdd"],
60824
+ description: "Validation scope: 'all' (default), 'tasks', 'docs', 'templates', or 'sdd' for spec-driven checks"
60825
+ },
60826
+ strict: {
60827
+ type: "boolean",
60828
+ description: "Treat warnings as errors (default: false)"
60829
+ },
60830
+ fix: {
60831
+ type: "boolean",
60832
+ description: "Auto-fix supported issues like broken doc refs (default: false)"
60833
+ }
60834
+ }
60835
+ }
60836
+ }
60837
+ ];
60838
+ function stripTrailingPunctuation(path2) {
60839
+ return path2.replace(/[.,;:!?`'")\]]+$/, "");
60840
+ }
60841
+ function extractRefs(content) {
60842
+ const docRefs = [];
60843
+ const taskRefs = [];
60844
+ const templateRefs = [];
60845
+ const docRefPattern = /@docs?\/([^\s,;:!?"'()\]]+)/g;
60846
+ for (const match2 of content.matchAll(docRefPattern)) {
60847
+ let docPath = stripTrailingPunctuation(match2[1] || "");
60848
+ docPath = docPath.replace(/\.md$/, "");
60849
+ if (docPath && !docRefs.includes(docPath)) {
60850
+ docRefs.push(docPath);
60851
+ }
60852
+ }
60853
+ const taskRefPattern = /@task-([a-zA-Z0-9]+)/g;
60854
+ for (const match2 of content.matchAll(taskRefPattern)) {
60855
+ const taskId = match2[1] || "";
60856
+ if (taskId && !taskRefs.includes(taskId)) {
60857
+ taskRefs.push(taskId);
60858
+ }
60859
+ }
60860
+ const templateRefPattern = /@template\/([^\s,;:!?"'()\]]+)/g;
60861
+ for (const match2 of content.matchAll(templateRefPattern)) {
60862
+ const templateName = stripTrailingPunctuation(match2[1] || "");
60863
+ if (templateName && !templateRefs.includes(templateName)) {
60864
+ templateRefs.push(templateName);
60865
+ }
60866
+ }
60867
+ return { docRefs, taskRefs, templateRefs };
60868
+ }
60869
+ async function loadValidateConfig(projectRoot) {
60870
+ const config2 = await readConfig(projectRoot);
60871
+ return config2.validate || {};
60872
+ }
60873
+ function getRuleSeverity(rule, defaultSeverity, validateConfig) {
60874
+ if (validateConfig.rules?.[rule]) {
60875
+ return validateConfig.rules[rule];
60876
+ }
60877
+ return defaultSeverity;
60878
+ }
60879
+ function shouldIgnore(entity, validateConfig) {
60880
+ if (!validateConfig.ignore) return false;
60881
+ for (const pattern of validateConfig.ignore) {
60882
+ const regex = new RegExp(`^${pattern.replace(/\*\*/g, ".*").replace(/\*/g, "[^/]*")}$`);
60883
+ if (regex.test(entity)) return true;
60884
+ }
60885
+ return false;
60886
+ }
60887
+ async function findSimilarDocs(projectRoot, brokenRef) {
60888
+ const docsDir = join20(projectRoot, ".knowns", "docs");
60889
+ if (!existsSync16(docsDir)) return null;
60890
+ const allDocs = [];
60891
+ async function scanDir(dir, relativePath) {
60892
+ const entries = await readdir9(dir, { withFileTypes: true });
60893
+ for (const entry of entries) {
60894
+ if (entry.name.startsWith(".")) continue;
60895
+ const entryRelPath = relativePath ? `${relativePath}/${entry.name}` : entry.name;
60896
+ if (entry.isDirectory()) {
60897
+ await scanDir(join20(dir, entry.name), entryRelPath);
60898
+ } else if (entry.name.endsWith(".md")) {
60899
+ allDocs.push(entryRelPath.replace(/\.md$/, ""));
60900
+ }
60901
+ }
60902
+ }
60903
+ await scanDir(docsDir, "");
60904
+ const brokenLower = brokenRef.toLowerCase();
60905
+ let bestMatch = null;
60906
+ let bestScore = 0;
60907
+ for (const doc of allDocs) {
60908
+ const docLower = doc.toLowerCase();
60909
+ const brokenParts = brokenLower.split(/[-_/]/);
60910
+ const docParts = docLower.split(/[-_/]/);
60911
+ let matchScore = 0;
60912
+ for (const part of brokenParts) {
60913
+ if (docLower.includes(part) && part.length > 2) {
60914
+ matchScore += part.length;
60915
+ }
60916
+ }
60917
+ for (const part of docParts) {
60918
+ if (brokenLower.includes(part) && part.length > 2) {
60919
+ matchScore += part.length;
60920
+ }
60921
+ }
60922
+ if (matchScore > bestScore) {
60923
+ bestScore = matchScore;
60924
+ bestMatch = doc;
60925
+ }
60926
+ }
60927
+ return bestScore >= 3 ? bestMatch : null;
60928
+ }
60929
+ async function validateTasks(projectRoot, fileStore, validateConfig) {
60930
+ const issues = [];
60931
+ const tasks = await fileStore.getAllTasks();
60932
+ const taskIds = new Set(tasks.map((t) => t.id));
60933
+ for (const task of tasks) {
60934
+ const taskRef = `task-${task.id}`;
60935
+ if (shouldIgnore(taskRef, validateConfig)) continue;
60936
+ const content = `${task.description || ""} ${task.implementationPlan || ""} ${task.implementationNotes || ""}`;
60937
+ const { docRefs, taskRefs, templateRefs } = extractRefs(content);
60938
+ const noAcSeverity = getRuleSeverity("task-no-ac", "warning", validateConfig);
60939
+ if (noAcSeverity !== "off" && (!task.acceptanceCriteria || task.acceptanceCriteria.length === 0)) {
60940
+ issues.push({
60941
+ entity: taskRef,
60942
+ entityType: "task",
60943
+ rule: "task-no-ac",
60944
+ severity: noAcSeverity,
60945
+ message: "Task has no acceptance criteria"
60946
+ });
60947
+ }
60948
+ const noDescSeverity = getRuleSeverity("task-no-description", "warning", validateConfig);
60949
+ if (noDescSeverity !== "off" && (!task.description || task.description.trim() === "")) {
60950
+ issues.push({
60951
+ entity: taskRef,
60952
+ entityType: "task",
60953
+ rule: "task-no-description",
60954
+ severity: noDescSeverity,
60955
+ message: "Task has no description"
60956
+ });
60957
+ }
60958
+ const brokenDocSeverity = getRuleSeverity("task-broken-doc-ref", "error", validateConfig);
60959
+ if (brokenDocSeverity !== "off") {
60960
+ for (const docPath of docRefs) {
60961
+ const resolved = await resolveDoc(projectRoot, docPath);
60962
+ if (!resolved) {
60963
+ const suggestion = await findSimilarDocs(projectRoot, docPath);
60964
+ const issue2 = {
60965
+ entity: taskRef,
60966
+ entityType: "task",
60967
+ rule: "task-broken-doc-ref",
60968
+ severity: brokenDocSeverity,
60969
+ message: suggestion ? `Broken reference: @doc/${docPath} \u2192 did you mean @doc/${suggestion}?` : `Broken reference: @doc/${docPath}`,
60970
+ fixable: !!suggestion
60971
+ };
60972
+ if (suggestion) {
60973
+ issue2.fix = async () => {
60974
+ const tasksDir = join20(projectRoot, ".knowns", "tasks");
60975
+ const files = await readdir9(tasksDir);
60976
+ const taskFile = files.find((f) => f.startsWith(`task-${task.id} `));
60977
+ if (taskFile) {
60978
+ const taskFilePath = join20(tasksDir, taskFile);
60979
+ const taskContent = await readFile10(taskFilePath, "utf-8");
60980
+ const updated = taskContent.replace(
60981
+ new RegExp(`@docs?/${docPath.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")}`, "g"),
60982
+ `@doc/${suggestion}`
60983
+ );
60984
+ await writeFile7(taskFilePath, updated, "utf-8");
60985
+ }
60986
+ };
60987
+ }
60988
+ issues.push(issue2);
60989
+ }
60990
+ }
60991
+ }
60992
+ const brokenTaskSeverity = getRuleSeverity("task-broken-task-ref", "error", validateConfig);
60993
+ if (brokenTaskSeverity !== "off") {
60994
+ for (const refTaskId of taskRefs) {
60995
+ if (!taskIds.has(refTaskId)) {
60996
+ issues.push({
60997
+ entity: taskRef,
60998
+ entityType: "task",
60999
+ rule: "task-broken-task-ref",
61000
+ severity: brokenTaskSeverity,
61001
+ message: `Broken reference: @task-${refTaskId}`
61002
+ });
61003
+ }
61004
+ }
61005
+ }
61006
+ const brokenTplSeverity = getRuleSeverity("task-broken-template-ref", "error", validateConfig);
61007
+ if (brokenTplSeverity !== "off") {
61008
+ for (const templateName of templateRefs) {
61009
+ const resolved = await resolveTemplate(projectRoot, templateName);
61010
+ if (!resolved) {
61011
+ issues.push({
61012
+ entity: taskRef,
61013
+ entityType: "task",
61014
+ rule: "task-broken-template-ref",
61015
+ severity: brokenTplSeverity,
61016
+ message: `Broken reference: @template/${templateName}`
61017
+ });
61018
+ }
61019
+ }
61020
+ }
61021
+ const selfRefSeverity = getRuleSeverity("task-self-ref", "warning", validateConfig);
61022
+ if (selfRefSeverity !== "off" && taskRefs.includes(task.id)) {
61023
+ issues.push({
61024
+ entity: taskRef,
61025
+ entityType: "task",
61026
+ rule: "task-self-ref",
61027
+ severity: selfRefSeverity,
61028
+ message: "Task references itself"
61029
+ });
61030
+ }
61031
+ const circularSeverity = getRuleSeverity("task-circular-parent", "error", validateConfig);
61032
+ if (circularSeverity !== "off" && task.parent) {
61033
+ const visited = /* @__PURE__ */ new Set();
61034
+ let currentId = task.parent;
61035
+ while (currentId) {
61036
+ if (visited.has(currentId) || currentId === task.id) {
61037
+ issues.push({
61038
+ entity: taskRef,
61039
+ entityType: "task",
61040
+ rule: "task-circular-parent",
61041
+ severity: circularSeverity,
61042
+ message: currentId === task.id ? "Task is its own ancestor" : "Circular parent-child relationship detected"
61043
+ });
61044
+ break;
61045
+ }
61046
+ visited.add(currentId);
61047
+ const parentTask = tasks.find((t) => t.id === currentId);
61048
+ currentId = parentTask?.parent;
61049
+ }
61050
+ }
61051
+ }
61052
+ return issues;
61053
+ }
61054
+ async function validateDocs(projectRoot, fileStore, validateConfig) {
61055
+ const issues = [];
61056
+ const docsDir = join20(projectRoot, ".knowns", "docs");
61057
+ if (!existsSync16(docsDir)) return issues;
61058
+ const tasks = await fileStore.getAllTasks();
61059
+ const taskIds = new Set(tasks.map((t) => t.id));
61060
+ const referencedDocs = /* @__PURE__ */ new Set();
61061
+ for (const task of tasks) {
61062
+ const content = `${task.description || ""} ${task.implementationPlan || ""} ${task.implementationNotes || ""}`;
61063
+ const { docRefs } = extractRefs(content);
61064
+ for (const ref of docRefs) {
61065
+ referencedDocs.add(ref.toLowerCase());
61066
+ }
61067
+ }
61068
+ async function scanDir(dir, relativePath) {
61069
+ const entries = await readdir9(dir, { withFileTypes: true });
61070
+ for (const entry of entries) {
61071
+ if (entry.name.startsWith(".")) continue;
61072
+ const fullPath = join20(dir, entry.name);
61073
+ const entryRelPath = relativePath ? `${relativePath}/${entry.name}` : entry.name;
61074
+ if (entry.isDirectory()) {
61075
+ await scanDir(fullPath, entryRelPath);
61076
+ } else if (entry.name.endsWith(".md")) {
61077
+ const docPath = entryRelPath.replace(/\.md$/, "");
61078
+ const docRef = `docs/${docPath}`;
61079
+ if (shouldIgnore(docRef, validateConfig) || shouldIgnore(docPath, validateConfig)) continue;
61080
+ try {
61081
+ const content = await readFile10(fullPath, "utf-8");
61082
+ const { data, content: docContent } = (0, import_gray_matter6.default)(content);
61083
+ const noDescSeverity = getRuleSeverity("doc-no-description", "warning", validateConfig);
61084
+ if (noDescSeverity !== "off" && (!data.description || String(data.description).trim() === "")) {
61085
+ issues.push({
61086
+ entity: docRef,
61087
+ entityType: "doc",
61088
+ rule: "doc-no-description",
61089
+ severity: noDescSeverity,
61090
+ message: "Doc has no description"
61091
+ });
61092
+ }
61093
+ const orphanSeverity = getRuleSeverity("doc-orphan", "info", validateConfig);
61094
+ if (orphanSeverity !== "off" && !referencedDocs.has(docPath.toLowerCase())) {
61095
+ issues.push({
61096
+ entity: docRef,
61097
+ entityType: "doc",
61098
+ rule: "doc-orphan",
61099
+ severity: orphanSeverity,
61100
+ message: "Doc is not referenced by any task"
61101
+ });
61102
+ }
61103
+ const { docRefs, taskRefs } = extractRefs(docContent);
61104
+ const brokenDocSeverity = getRuleSeverity("doc-broken-doc-ref", "error", validateConfig);
61105
+ if (brokenDocSeverity !== "off") {
61106
+ for (const refDocPath of docRefs) {
61107
+ const resolved = await resolveDoc(projectRoot, refDocPath);
61108
+ if (!resolved) {
61109
+ const suggestion = await findSimilarDocs(projectRoot, refDocPath);
61110
+ const issue2 = {
61111
+ entity: docRef,
61112
+ entityType: "doc",
61113
+ rule: "doc-broken-doc-ref",
61114
+ severity: brokenDocSeverity,
61115
+ message: suggestion ? `Broken reference: @doc/${refDocPath} \u2192 did you mean @doc/${suggestion}?` : `Broken reference: @doc/${refDocPath}`,
61116
+ fixable: !!suggestion
61117
+ };
61118
+ if (suggestion) {
61119
+ issue2.fix = async () => {
61120
+ const docFileContent = await readFile10(fullPath, "utf-8");
61121
+ const updated = docFileContent.replace(
61122
+ new RegExp(`@docs?/${refDocPath.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")}`, "g"),
61123
+ `@doc/${suggestion}`
61124
+ );
61125
+ await writeFile7(fullPath, updated, "utf-8");
61126
+ };
61127
+ }
61128
+ issues.push(issue2);
61129
+ }
61130
+ }
61131
+ }
61132
+ const brokenTaskSeverity = getRuleSeverity("doc-broken-task-ref", "error", validateConfig);
61133
+ if (brokenTaskSeverity !== "off") {
61134
+ for (const refTaskId of taskRefs) {
61135
+ if (!taskIds.has(refTaskId)) {
61136
+ const issue2 = {
61137
+ entity: docRef,
61138
+ entityType: "doc",
61139
+ rule: "doc-broken-task-ref",
61140
+ severity: brokenTaskSeverity,
61141
+ message: `Broken reference: @task-${refTaskId}`,
61142
+ fixable: true
61143
+ };
61144
+ issue2.fix = async () => {
61145
+ const docFileContent = await readFile10(fullPath, "utf-8");
61146
+ const updated = docFileContent.replace(
61147
+ new RegExp(`@task-${refTaskId}\\b`, "g"),
61148
+ `~task-${refTaskId}`
61149
+ );
61150
+ await writeFile7(fullPath, updated, "utf-8");
61151
+ };
61152
+ issues.push(issue2);
61153
+ }
61154
+ }
61155
+ }
61156
+ } catch {
61157
+ }
61158
+ }
61159
+ }
61160
+ }
61161
+ await scanDir(docsDir, "");
61162
+ return issues;
61163
+ }
61164
+ async function validateTemplates(projectRoot, validateConfig) {
61165
+ const issues = [];
61166
+ const templates = await listAllTemplates(projectRoot);
61167
+ for (const template of templates) {
61168
+ const templateRef = `templates/${template.ref}`;
61169
+ if (shouldIgnore(templateRef, validateConfig)) continue;
61170
+ try {
61171
+ const config2 = await getTemplateConfig(template.path);
61172
+ const invalidSyntaxSeverity = getRuleSeverity("template-invalid-syntax", "error", validateConfig);
61173
+ if (invalidSyntaxSeverity !== "off" && !config2) {
61174
+ issues.push({
61175
+ entity: templateRef,
61176
+ entityType: "template",
61177
+ rule: "template-invalid-syntax",
61178
+ severity: invalidSyntaxSeverity,
61179
+ message: "Failed to load template config (invalid or missing _template.yaml)"
61180
+ });
61181
+ continue;
61182
+ }
61183
+ if (!config2) continue;
61184
+ const brokenDocSeverity = getRuleSeverity("template-broken-doc-ref", "error", validateConfig);
61185
+ if (brokenDocSeverity !== "off" && config2.doc) {
61186
+ const resolved = await resolveDoc(projectRoot, config2.doc);
61187
+ if (!resolved) {
61188
+ issues.push({
61189
+ entity: templateRef,
61190
+ entityType: "template",
61191
+ rule: "template-broken-doc-ref",
61192
+ severity: brokenDocSeverity,
61193
+ message: `Broken doc reference: @doc/${config2.doc}`
61194
+ });
61195
+ }
61196
+ }
61197
+ if (invalidSyntaxSeverity !== "off") {
61198
+ for (const action of config2.actions || []) {
61199
+ if (action.type === "add" && action.template) {
61200
+ const templateFilePath = join20(template.path, action.template);
61201
+ if (existsSync16(templateFilePath)) {
61202
+ try {
61203
+ const templateContent = await readFile10(templateFilePath, "utf-8");
61204
+ import_handlebars2.default.compile(templateContent);
61205
+ } catch (err) {
61206
+ issues.push({
61207
+ entity: templateRef,
61208
+ entityType: "template",
61209
+ rule: "template-invalid-syntax",
61210
+ severity: invalidSyntaxSeverity,
61211
+ message: `Invalid Handlebars syntax in ${action.template}: ${err instanceof Error ? err.message : "unknown error"}`
61212
+ });
61213
+ }
61214
+ }
61215
+ }
61216
+ }
61217
+ }
61218
+ const missingPartialSeverity = getRuleSeverity("template-missing-partial", "error", validateConfig);
61219
+ if (missingPartialSeverity !== "off" && existsSync16(template.path)) {
61220
+ const files = await readdir9(template.path);
61221
+ const hbsFiles = files.filter((f) => f.endsWith(".hbs"));
61222
+ for (const hbsFile of hbsFiles) {
61223
+ const content = await readFile10(join20(template.path, hbsFile), "utf-8");
61224
+ const partialPattern = /\{\{>\s*([^\s}]+)\s*\}\}/g;
61225
+ for (const match2 of content.matchAll(partialPattern)) {
61226
+ const partialName = match2[1];
61227
+ const partialPath = join20(template.path, `_${partialName}.hbs`);
61228
+ if (!existsSync16(partialPath)) {
61229
+ issues.push({
61230
+ entity: templateRef,
61231
+ entityType: "template",
61232
+ rule: "template-missing-partial",
61233
+ severity: missingPartialSeverity,
61234
+ message: `Missing partial: ${partialName} (expected at _${partialName}.hbs)`
61235
+ });
61236
+ }
61237
+ }
61238
+ }
61239
+ }
61240
+ } catch (err) {
61241
+ const invalidSyntaxSeverity = getRuleSeverity("template-invalid-syntax", "error", validateConfig);
61242
+ if (invalidSyntaxSeverity !== "off") {
61243
+ issues.push({
61244
+ entity: templateRef,
61245
+ entityType: "template",
61246
+ rule: "template-invalid-syntax",
61247
+ severity: invalidSyntaxSeverity,
61248
+ message: `Failed to load template config: ${err instanceof Error ? err.message : "unknown error"}`
61249
+ });
61250
+ }
61251
+ }
61252
+ }
61253
+ return issues;
61254
+ }
61255
+ async function applyFixes(issues) {
61256
+ const results = [];
61257
+ const fixableIssues = issues.filter((i) => i.fixable && i.fix);
61258
+ for (const issue2 of fixableIssues) {
61259
+ try {
61260
+ await issue2.fix?.();
61261
+ results.push({
61262
+ entity: issue2.entity,
61263
+ rule: issue2.rule,
61264
+ action: issue2.message,
61265
+ success: true
61266
+ });
61267
+ } catch (err) {
61268
+ results.push({
61269
+ entity: issue2.entity,
61270
+ rule: issue2.rule,
61271
+ action: `Failed: ${err instanceof Error ? err.message : "unknown error"}`,
61272
+ success: false
61273
+ });
61274
+ }
61275
+ }
61276
+ return results;
61277
+ }
61278
+ async function runSDDValidation(projectRoot, fileStore) {
61279
+ const tasks = await fileStore.getAllTasks();
61280
+ const docsDir = join20(projectRoot, ".knowns", "docs");
61281
+ const stats = {
61282
+ specs: { total: 0, approved: 0, draft: 0 },
61283
+ tasks: { total: tasks.length, done: 0, inProgress: 0, todo: 0, withSpec: 0, withoutSpec: 0 },
61284
+ coverage: { linked: 0, total: tasks.length, percent: 0 },
61285
+ acCompletion: {}
61286
+ };
61287
+ const warnings = [];
61288
+ const passed = [];
61289
+ for (const task of tasks) {
61290
+ if (task.status === "done") stats.tasks.done++;
61291
+ else if (task.status === "in-progress") stats.tasks.inProgress++;
61292
+ else stats.tasks.todo++;
61293
+ if (task.spec) {
61294
+ stats.tasks.withSpec++;
61295
+ } else {
61296
+ stats.tasks.withoutSpec++;
61297
+ warnings.push({
61298
+ type: "task-no-spec",
61299
+ entity: `task-${task.id}`,
61300
+ message: `${task.title} has no spec reference`
61301
+ });
61302
+ }
61303
+ }
61304
+ stats.coverage.linked = stats.tasks.withSpec;
61305
+ stats.coverage.percent = stats.tasks.total > 0 ? Math.round(stats.tasks.withSpec / stats.tasks.total * 100) : 0;
61306
+ const specsDir = join20(docsDir, "specs");
61307
+ if (existsSync16(specsDir)) {
61308
+ async function scanSpecs(dir, relativePath) {
61309
+ const entries = await readdir9(dir, { withFileTypes: true });
61310
+ for (const entry of entries) {
61311
+ if (entry.name.startsWith(".")) continue;
61312
+ const entryRelPath = relativePath ? `${relativePath}/${entry.name}` : entry.name;
61313
+ if (entry.isDirectory()) {
61314
+ await scanSpecs(join20(dir, entry.name), entryRelPath);
61315
+ } else if (entry.name.endsWith(".md")) {
61316
+ stats.specs.total++;
61317
+ const specPath = `specs/${entryRelPath.replace(/\.md$/, "")}`;
61318
+ try {
61319
+ const content = await readFile10(join20(dir, entry.name), "utf-8");
61320
+ const { data } = (0, import_gray_matter6.default)(content);
61321
+ if (data.status === "approved" || data.status === "implemented") {
61322
+ stats.specs.approved++;
61323
+ } else {
61324
+ stats.specs.draft++;
61325
+ }
61326
+ const linkedTasks = tasks.filter((t) => t.spec === specPath);
61327
+ if (linkedTasks.length > 0) {
61328
+ let totalAC = 0;
61329
+ let completedAC = 0;
61330
+ for (const task of linkedTasks) {
61331
+ totalAC += task.acceptanceCriteria.length;
61332
+ completedAC += task.acceptanceCriteria.filter((ac) => ac.completed).length;
61333
+ }
61334
+ const percent = totalAC > 0 ? Math.round(completedAC / totalAC * 100) : 100;
61335
+ stats.acCompletion[specPath] = { total: totalAC, completed: completedAC, percent };
61336
+ if (percent < 100 && totalAC > 0) {
61337
+ warnings.push({
61338
+ type: "spec-ac-incomplete",
61339
+ entity: specPath,
61340
+ message: `${completedAC}/${totalAC} ACs complete (${percent}%)`
61341
+ });
61342
+ }
61343
+ }
61344
+ } catch {
61345
+ }
61346
+ }
61347
+ }
61348
+ }
61349
+ await scanSpecs(specsDir, "");
61350
+ }
61351
+ for (const task of tasks) {
61352
+ if (task.spec) {
61353
+ const specDocPath = join20(docsDir, `${task.spec}.md`);
61354
+ if (!existsSync16(specDocPath)) {
61355
+ warnings.push({
61356
+ type: "spec-broken-link",
61357
+ entity: `task-${task.id}`,
61358
+ message: `Broken spec reference: @doc/${task.spec}`
61359
+ });
61360
+ }
61361
+ }
61362
+ }
61363
+ if (warnings.filter((w) => w.type === "spec-broken-link").length === 0) {
61364
+ passed.push("All spec references resolve");
61365
+ }
61366
+ for (const [specPath, completion] of Object.entries(stats.acCompletion)) {
61367
+ if (completion.percent === 100) {
61368
+ passed.push(`${specPath}: fully implemented`);
61369
+ }
61370
+ }
61371
+ return { stats, warnings, passed };
61372
+ }
61373
+ async function handleValidate(args, fileStore) {
61374
+ try {
61375
+ const projectRoot = getProjectRoot();
61376
+ if (args?.scope === "sdd") {
61377
+ const sddResult = await runSDDValidation(projectRoot, fileStore);
61378
+ return successResponse({
61379
+ mode: "sdd",
61380
+ stats: sddResult.stats,
61381
+ warnings: sddResult.warnings,
61382
+ passed: sddResult.passed
61383
+ });
61384
+ }
61385
+ const validateConfig = await loadValidateConfig(projectRoot);
61386
+ const allIssues = [];
61387
+ const stats = { tasks: 0, docs: 0, templates: 0 };
61388
+ if (!args?.type || args.type === "task") {
61389
+ const tasks = await fileStore.getAllTasks();
61390
+ stats.tasks = tasks.length;
61391
+ const taskIssues = await validateTasks(projectRoot, fileStore, validateConfig);
61392
+ allIssues.push(...taskIssues);
61393
+ }
61394
+ if (!args?.type || args.type === "doc") {
61395
+ const docsDir = join20(projectRoot, ".knowns", "docs");
61396
+ if (existsSync16(docsDir)) {
61397
+ async function countDocs(dir) {
61398
+ let count = 0;
61399
+ const entries = await readdir9(dir, { withFileTypes: true });
61400
+ for (const entry of entries) {
61401
+ if (entry.name.startsWith(".")) continue;
61402
+ if (entry.isDirectory()) {
61403
+ count += await countDocs(join20(dir, entry.name));
61404
+ } else if (entry.name.endsWith(".md")) {
61405
+ count++;
61406
+ }
61407
+ }
61408
+ return count;
61409
+ }
61410
+ stats.docs = await countDocs(docsDir);
61411
+ }
61412
+ const docIssues = await validateDocs(projectRoot, fileStore, validateConfig);
61413
+ allIssues.push(...docIssues);
61414
+ }
61415
+ if (!args?.type || args.type === "template") {
61416
+ const templates = await listAllTemplates(projectRoot);
61417
+ stats.templates = templates.length;
61418
+ const templateIssues = await validateTemplates(projectRoot, validateConfig);
61419
+ allIssues.push(...templateIssues);
61420
+ }
61421
+ if (args?.strict) {
61422
+ for (const issue2 of allIssues) {
61423
+ if (issue2.severity === "warning") {
61424
+ issue2.severity = "error";
61425
+ }
61426
+ }
61427
+ }
61428
+ let fixes = [];
61429
+ if (args?.fix) {
61430
+ fixes = await applyFixes(allIssues);
61431
+ }
61432
+ const errors = allIssues.filter((i) => i.severity === "error");
61433
+ const warnings = allIssues.filter((i) => i.severity === "warning");
61434
+ const infos = allIssues.filter((i) => i.severity === "info");
61435
+ return successResponse({
61436
+ valid: errors.length === 0,
61437
+ stats,
61438
+ summary: {
61439
+ errors: errors.length,
61440
+ warnings: warnings.length,
61441
+ info: infos.length
61442
+ },
61443
+ issues: allIssues.map((i) => ({
61444
+ entity: i.entity,
61445
+ entityType: i.entityType,
61446
+ rule: i.rule,
61447
+ severity: i.severity,
61448
+ message: i.message,
61449
+ fixable: i.fixable || false
61450
+ })),
61451
+ ...args?.fix && fixes.length > 0 ? { fixes } : {}
61452
+ });
61453
+ } catch (error48) {
61454
+ return errorResponse(error48 instanceof Error ? error48.message : String(error48));
61455
+ }
61456
+ }
61457
+
60769
61458
  // src/mcp/server.ts
60770
61459
  var fileStoreCache = /* @__PURE__ */ new Map();
60771
61460
  function getFileStore() {
@@ -60796,7 +61485,8 @@ var tools = [
60796
61485
  ...boardTools,
60797
61486
  ...docTools,
60798
61487
  ...templateTools,
60799
- ...searchTools
61488
+ ...searchTools,
61489
+ ...validateTools
60800
61490
  ];
60801
61491
  server.setRequestHandler(ListToolsRequestSchema, async () => {
60802
61492
  return { tools };
@@ -60860,6 +61550,9 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
60860
61550
  // Unified search handler
60861
61551
  case "search":
60862
61552
  return await handleSearch(args, getFileStore());
61553
+ // Validate handler
61554
+ case "validate":
61555
+ return await handleValidate(args, getFileStore());
60863
61556
  default:
60864
61557
  return errorResponse(`Unknown tool: ${name}`);
60865
61558
  }
@@ -60869,7 +61562,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
60869
61562
  });
60870
61563
  server.setRequestHandler(ListResourcesRequestSchema, async () => {
60871
61564
  const tasks = await getFileStore().getAllTasks();
60872
- const docsDir = join20(getProjectRoot(), ".knowns", "docs");
61565
+ const docsDir = join21(getProjectRoot(), ".knowns", "docs");
60873
61566
  const taskResources = tasks.map((task) => ({
60874
61567
  uri: `knowns://task/${task.id}`,
60875
61568
  name: task.title,
@@ -60877,14 +61570,14 @@ server.setRequestHandler(ListResourcesRequestSchema, async () => {
60877
61570
  description: `Task #${task.id}: ${task.title}`
60878
61571
  }));
60879
61572
  const docResources = [];
60880
- if (existsSync16(docsDir)) {
60881
- const { readdir: readdir9 } = await import("node:fs/promises");
61573
+ if (existsSync17(docsDir)) {
61574
+ const { readdir: readdir10 } = await import("node:fs/promises");
60882
61575
  async function getAllMdFiles3(dir, basePath = "") {
60883
61576
  const files = [];
60884
- const entries = await readdir9(dir, { withFileTypes: true });
61577
+ const entries = await readdir10(dir, { withFileTypes: true });
60885
61578
  for (const entry of entries) {
60886
- const fullPath = join20(dir, entry.name);
60887
- const relativePath = normalizePath(basePath ? join20(basePath, entry.name) : entry.name);
61579
+ const fullPath = join21(dir, entry.name);
61580
+ const relativePath = normalizePath(basePath ? join21(basePath, entry.name) : entry.name);
60888
61581
  if (entry.isDirectory()) {
60889
61582
  const subFiles = await getAllMdFiles3(fullPath, relativePath);
60890
61583
  files.push(...subFiles);
@@ -60896,9 +61589,9 @@ server.setRequestHandler(ListResourcesRequestSchema, async () => {
60896
61589
  }
60897
61590
  const mdFiles = await getAllMdFiles3(docsDir);
60898
61591
  for (const file3 of mdFiles) {
60899
- const filepath = join20(docsDir, file3);
60900
- const content = await readFile10(filepath, "utf-8");
60901
- const { data } = (0, import_gray_matter6.default)(content);
61592
+ const filepath = join21(docsDir, file3);
61593
+ const content = await readFile11(filepath, "utf-8");
61594
+ const { data } = (0, import_gray_matter7.default)(content);
60902
61595
  docResources.push({
60903
61596
  uri: `knowns://doc/${file3.replace(/\.md$/, "")}`,
60904
61597
  name: data.title || file3.replace(/\.md$/, ""),
@@ -60933,13 +61626,13 @@ server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
60933
61626
  const docMatch = uri.match(/^knowns:\/\/doc\/(.+)$/);
60934
61627
  if (docMatch) {
60935
61628
  const docPath = docMatch[1];
60936
- const docsDir = join20(getProjectRoot(), ".knowns", "docs");
60937
- const filepath = join20(docsDir, `${docPath}.md`);
60938
- if (!existsSync16(filepath)) {
61629
+ const docsDir = join21(getProjectRoot(), ".knowns", "docs");
61630
+ const filepath = join21(docsDir, `${docPath}.md`);
61631
+ if (!existsSync17(filepath)) {
60939
61632
  throw new Error(`Documentation ${docPath} not found`);
60940
61633
  }
60941
- const content = await readFile10(filepath, "utf-8");
60942
- const { data, content: docContent } = (0, import_gray_matter6.default)(content);
61634
+ const content = await readFile11(filepath, "utf-8");
61635
+ const { data, content: docContent } = (0, import_gray_matter7.default)(content);
60943
61636
  return {
60944
61637
  contents: [
60945
61638
  {