knowns 0.10.3 → 0.10.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -0
- package/dist/index.js +10974 -8682
- package/dist/mcp/server.js +376 -241
- package/dist/ui/assets/index-B1mpVDN3.css +1 -0
- package/dist/ui/assets/{index-Djj_i5GU.js → index-NgD_-y0O.js} +132 -132
- package/dist/ui/index.html +2 -2
- package/package.json +4 -2
- package/dist/ui/assets/index-DyaL4XLd.css +0 -1
package/dist/mcp/server.js
CHANGED
|
@@ -10289,25 +10289,25 @@ var require_gray_matter = __commonJS({
|
|
|
10289
10289
|
});
|
|
10290
10290
|
|
|
10291
10291
|
// src/import/config.ts
|
|
10292
|
-
import { existsSync as
|
|
10293
|
-
import { mkdir as mkdir3, readFile as
|
|
10294
|
-
import { dirname as dirname2, join as
|
|
10292
|
+
import { existsSync as existsSync5 } from "node:fs";
|
|
10293
|
+
import { mkdir as mkdir3, readFile as readFile4, writeFile as writeFile3 } from "node:fs/promises";
|
|
10294
|
+
import { dirname as dirname2, join as join8 } from "node:path";
|
|
10295
10295
|
function getKnownsDir(projectRoot) {
|
|
10296
|
-
return
|
|
10296
|
+
return join8(projectRoot, ".knowns");
|
|
10297
10297
|
}
|
|
10298
10298
|
function getConfigPath(projectRoot) {
|
|
10299
|
-
return
|
|
10299
|
+
return join8(getKnownsDir(projectRoot), CONFIG_FILE);
|
|
10300
10300
|
}
|
|
10301
10301
|
function getImportsDir(projectRoot) {
|
|
10302
|
-
return
|
|
10302
|
+
return join8(getKnownsDir(projectRoot), IMPORTS_DIR);
|
|
10303
10303
|
}
|
|
10304
10304
|
async function readConfig(projectRoot) {
|
|
10305
10305
|
const configPath = getConfigPath(projectRoot);
|
|
10306
|
-
if (!
|
|
10306
|
+
if (!existsSync5(configPath)) {
|
|
10307
10307
|
return {};
|
|
10308
10308
|
}
|
|
10309
10309
|
try {
|
|
10310
|
-
const content = await
|
|
10310
|
+
const content = await readFile4(configPath, "utf-8");
|
|
10311
10311
|
return JSON.parse(content);
|
|
10312
10312
|
} catch {
|
|
10313
10313
|
return {};
|
|
@@ -13333,7 +13333,7 @@ var require_util2 = __commonJS({
|
|
|
13333
13333
|
return path2;
|
|
13334
13334
|
}
|
|
13335
13335
|
exports2.normalize = normalize2;
|
|
13336
|
-
function
|
|
13336
|
+
function join20(aRoot, aPath) {
|
|
13337
13337
|
if (aRoot === "") {
|
|
13338
13338
|
aRoot = ".";
|
|
13339
13339
|
}
|
|
@@ -13365,7 +13365,7 @@ var require_util2 = __commonJS({
|
|
|
13365
13365
|
}
|
|
13366
13366
|
return joined;
|
|
13367
13367
|
}
|
|
13368
|
-
exports2.join =
|
|
13368
|
+
exports2.join = join20;
|
|
13369
13369
|
exports2.isAbsolute = function(aPath) {
|
|
13370
13370
|
return aPath.charAt(0) === "/" || urlRegexp.test(aPath);
|
|
13371
13371
|
};
|
|
@@ -13538,7 +13538,7 @@ var require_util2 = __commonJS({
|
|
|
13538
13538
|
parsed.path = parsed.path.substring(0, index + 1);
|
|
13539
13539
|
}
|
|
13540
13540
|
}
|
|
13541
|
-
sourceURL =
|
|
13541
|
+
sourceURL = join20(urlGenerate(parsed), sourceURL);
|
|
13542
13542
|
}
|
|
13543
13543
|
return normalize2(sourceURL);
|
|
13544
13544
|
}
|
|
@@ -28653,9 +28653,9 @@ var require_prompts3 = __commonJS({
|
|
|
28653
28653
|
});
|
|
28654
28654
|
|
|
28655
28655
|
// src/mcp/server.ts
|
|
28656
|
-
import { existsSync as
|
|
28657
|
-
import { readFile as
|
|
28658
|
-
import { join as
|
|
28656
|
+
import { existsSync as existsSync15 } from "node:fs";
|
|
28657
|
+
import { readFile as readFile10 } from "node:fs/promises";
|
|
28658
|
+
import { join as join19 } from "node:path";
|
|
28659
28659
|
|
|
28660
28660
|
// node_modules/zod/v3/helpers/util.js
|
|
28661
28661
|
var util;
|
|
@@ -50174,7 +50174,7 @@ var FileStore = class {
|
|
|
50174
50174
|
}
|
|
50175
50175
|
const oldParent = task.parent;
|
|
50176
50176
|
const newParent = updates.parent;
|
|
50177
|
-
if (oldParent !== newParent) {
|
|
50177
|
+
if ("parent" in updates && oldParent !== newParent) {
|
|
50178
50178
|
if (oldParent) {
|
|
50179
50179
|
await this.removeSubtask(oldParent, id);
|
|
50180
50180
|
}
|
|
@@ -50422,6 +50422,11 @@ function findProjectRoot(startPath = process.cwd()) {
|
|
|
50422
50422
|
return null;
|
|
50423
50423
|
}
|
|
50424
50424
|
|
|
50425
|
+
// src/utils/normalize-id.ts
|
|
50426
|
+
function normalizeTaskId(input) {
|
|
50427
|
+
return input.replace(/^task-/i, "");
|
|
50428
|
+
}
|
|
50429
|
+
|
|
50425
50430
|
// src/utils/index.ts
|
|
50426
50431
|
function normalizePath(filePath) {
|
|
50427
50432
|
return filePath.replace(/\\/g, "/");
|
|
@@ -50805,9 +50810,10 @@ async function handleCreateTask(args, fileStore2) {
|
|
|
50805
50810
|
}
|
|
50806
50811
|
async function handleGetTask(args, fileStore2) {
|
|
50807
50812
|
const input = getTaskSchema.parse(args);
|
|
50808
|
-
const
|
|
50813
|
+
const taskId = normalizeTaskId(input.taskId);
|
|
50814
|
+
const task = await fileStore2.getTask(taskId);
|
|
50809
50815
|
if (!task) {
|
|
50810
|
-
return errorResponse(`Task ${
|
|
50816
|
+
return errorResponse(`Task ${taskId} not found`);
|
|
50811
50817
|
}
|
|
50812
50818
|
const linkedDocs = await fetchLinkedDocs(task);
|
|
50813
50819
|
return successResponse({
|
|
@@ -50830,9 +50836,10 @@ async function handleGetTask(args, fileStore2) {
|
|
|
50830
50836
|
}
|
|
50831
50837
|
async function handleUpdateTask(args, fileStore2) {
|
|
50832
50838
|
const input = updateTaskSchema.parse(args);
|
|
50833
|
-
const
|
|
50839
|
+
const taskId = normalizeTaskId(input.taskId);
|
|
50840
|
+
const currentTask = await fileStore2.getTask(taskId);
|
|
50834
50841
|
if (!currentTask) {
|
|
50835
|
-
return errorResponse(`Task ${
|
|
50842
|
+
return errorResponse(`Task ${taskId} not found`);
|
|
50836
50843
|
}
|
|
50837
50844
|
const updates = {};
|
|
50838
50845
|
if (input.title) updates.title = input.title;
|
|
@@ -50893,7 +50900,7 @@ async function handleUpdateTask(args, fileStore2) {
|
|
|
50893
50900
|
const separator = existingNotes ? "\n\n" : "";
|
|
50894
50901
|
updates.implementationNotes = existingNotes + separator + input.appendNotes;
|
|
50895
50902
|
}
|
|
50896
|
-
const task = await fileStore2.updateTask(
|
|
50903
|
+
const task = await fileStore2.updateTask(taskId, updates);
|
|
50897
50904
|
await notifyTaskUpdate(task.id);
|
|
50898
50905
|
return successResponse({
|
|
50899
50906
|
task: {
|
|
@@ -50953,6 +50960,9 @@ async function handleSearchTasks(args, fileStore2) {
|
|
|
50953
50960
|
}
|
|
50954
50961
|
|
|
50955
50962
|
// src/mcp/handlers/time.ts
|
|
50963
|
+
import { existsSync as existsSync4 } from "node:fs";
|
|
50964
|
+
import { readFile as readFile3, writeFile as writeFile2 } from "node:fs/promises";
|
|
50965
|
+
import { join as join7 } from "node:path";
|
|
50956
50966
|
var startTimeSchema = external_exports3.object({
|
|
50957
50967
|
taskId: external_exports3.string()
|
|
50958
50968
|
});
|
|
@@ -51034,93 +51044,117 @@ var timeTools = [
|
|
|
51034
51044
|
}
|
|
51035
51045
|
}
|
|
51036
51046
|
];
|
|
51047
|
+
async function loadTimeData(projectRoot) {
|
|
51048
|
+
const timePath = join7(projectRoot, ".knowns", "time.json");
|
|
51049
|
+
if (!existsSync4(timePath)) {
|
|
51050
|
+
return { active: [] };
|
|
51051
|
+
}
|
|
51052
|
+
const content = await readFile3(timePath, "utf-8");
|
|
51053
|
+
const data = JSON.parse(content);
|
|
51054
|
+
if (data.active && !Array.isArray(data.active)) {
|
|
51055
|
+
return { active: [data.active] };
|
|
51056
|
+
}
|
|
51057
|
+
if (data.active === null) {
|
|
51058
|
+
return { active: [] };
|
|
51059
|
+
}
|
|
51060
|
+
return data;
|
|
51061
|
+
}
|
|
51062
|
+
async function saveTimeData(projectRoot, data) {
|
|
51063
|
+
const timePath = join7(projectRoot, ".knowns", "time.json");
|
|
51064
|
+
await writeFile2(timePath, JSON.stringify(data, null, 2), "utf-8");
|
|
51065
|
+
}
|
|
51037
51066
|
async function handleStartTime(args, fileStore2) {
|
|
51038
51067
|
const input = startTimeSchema.parse(args);
|
|
51039
|
-
const
|
|
51068
|
+
const taskId = normalizeTaskId(input.taskId);
|
|
51069
|
+
const task = await fileStore2.getTask(taskId);
|
|
51040
51070
|
if (!task) {
|
|
51041
|
-
return errorResponse(`Task ${
|
|
51071
|
+
return errorResponse(`Task ${taskId} not found`);
|
|
51042
51072
|
}
|
|
51043
|
-
const
|
|
51044
|
-
|
|
51045
|
-
|
|
51073
|
+
const data = await loadTimeData(fileStore2.projectRoot);
|
|
51074
|
+
const existingTimer = data.active.find((t) => t.taskId === taskId);
|
|
51075
|
+
if (existingTimer) {
|
|
51076
|
+
return errorResponse(`Timer already running for task ${taskId}`);
|
|
51046
51077
|
}
|
|
51047
|
-
const
|
|
51048
|
-
|
|
51049
|
-
startedAt: /* @__PURE__ */ new Date(),
|
|
51050
|
-
duration: 0,
|
|
51051
|
-
note: "Started via MCP"
|
|
51052
|
-
};
|
|
51053
|
-
await fileStore2.updateTask(input.taskId, {
|
|
51054
|
-
timeEntries: [...task.timeEntries, newEntry]
|
|
51055
|
-
});
|
|
51056
|
-
await notifyTaskUpdate(input.taskId);
|
|
51057
|
-
await notifyTimeUpdate({
|
|
51058
|
-
taskId: input.taskId,
|
|
51078
|
+
const newTimer = {
|
|
51079
|
+
taskId,
|
|
51059
51080
|
taskTitle: task.title,
|
|
51060
|
-
startedAt:
|
|
51081
|
+
startedAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
51061
51082
|
pausedAt: null,
|
|
51062
51083
|
totalPausedMs: 0
|
|
51063
|
-
}
|
|
51084
|
+
};
|
|
51085
|
+
data.active.push(newTimer);
|
|
51086
|
+
await saveTimeData(fileStore2.projectRoot, data);
|
|
51087
|
+
await notifyTaskUpdate(taskId);
|
|
51088
|
+
await notifyTimeUpdate(data.active);
|
|
51064
51089
|
return successResponse({
|
|
51065
|
-
message: `Started tracking time for task ${
|
|
51066
|
-
startedAt:
|
|
51090
|
+
message: `Started tracking time for task ${taskId}`,
|
|
51091
|
+
startedAt: newTimer.startedAt,
|
|
51092
|
+
activeTimers: data.active.length
|
|
51067
51093
|
});
|
|
51068
51094
|
}
|
|
51069
51095
|
async function handleStopTime(args, fileStore2) {
|
|
51070
51096
|
const input = stopTimeSchema.parse(args);
|
|
51071
|
-
const
|
|
51097
|
+
const taskId = normalizeTaskId(input.taskId);
|
|
51098
|
+
const task = await fileStore2.getTask(taskId);
|
|
51072
51099
|
if (!task) {
|
|
51073
|
-
return errorResponse(`Task ${
|
|
51074
|
-
}
|
|
51075
|
-
const
|
|
51076
|
-
|
|
51077
|
-
|
|
51078
|
-
|
|
51079
|
-
|
|
51080
|
-
const
|
|
51081
|
-
const
|
|
51082
|
-
const
|
|
51083
|
-
|
|
51084
|
-
|
|
51100
|
+
return errorResponse(`Task ${taskId} not found`);
|
|
51101
|
+
}
|
|
51102
|
+
const data = await loadTimeData(fileStore2.projectRoot);
|
|
51103
|
+
const timerIndex = data.active.findIndex((t) => t.taskId === taskId);
|
|
51104
|
+
if (timerIndex === -1) {
|
|
51105
|
+
return errorResponse(`No active timer for task ${taskId}`);
|
|
51106
|
+
}
|
|
51107
|
+
const timer = data.active[timerIndex];
|
|
51108
|
+
const { startedAt, pausedAt, totalPausedMs } = timer;
|
|
51109
|
+
const endTime = pausedAt ? new Date(pausedAt) : /* @__PURE__ */ new Date();
|
|
51110
|
+
const elapsed = endTime.getTime() - new Date(startedAt).getTime() - totalPausedMs;
|
|
51111
|
+
const duration3 = Math.floor(elapsed / 1e3);
|
|
51112
|
+
const newEntry = {
|
|
51113
|
+
id: `te-${Date.now()}-${taskId}`,
|
|
51114
|
+
startedAt: new Date(startedAt),
|
|
51085
51115
|
endedAt: endTime,
|
|
51086
51116
|
duration: duration3
|
|
51087
51117
|
};
|
|
51088
51118
|
const newTimeSpent = task.timeSpent + duration3;
|
|
51089
|
-
await fileStore2.updateTask(
|
|
51090
|
-
timeEntries:
|
|
51119
|
+
await fileStore2.updateTask(taskId, {
|
|
51120
|
+
timeEntries: [...task.timeEntries, newEntry],
|
|
51091
51121
|
timeSpent: newTimeSpent
|
|
51092
51122
|
});
|
|
51093
|
-
|
|
51094
|
-
await
|
|
51123
|
+
data.active.splice(timerIndex, 1);
|
|
51124
|
+
await saveTimeData(fileStore2.projectRoot, data);
|
|
51125
|
+
await notifyTaskUpdate(taskId);
|
|
51126
|
+
await notifyTimeUpdate(data.active.length > 0 ? data.active : null);
|
|
51095
51127
|
return successResponse({
|
|
51096
|
-
message: `Stopped tracking time for task ${
|
|
51128
|
+
message: `Stopped tracking time for task ${taskId}`,
|
|
51097
51129
|
duration: formatDuration(duration3),
|
|
51098
|
-
totalTime: formatDuration(newTimeSpent)
|
|
51130
|
+
totalTime: formatDuration(newTimeSpent),
|
|
51131
|
+
activeTimers: data.active.length
|
|
51099
51132
|
});
|
|
51100
51133
|
}
|
|
51101
51134
|
async function handleAddTime(args, fileStore2) {
|
|
51102
51135
|
const input = addTimeSchema.parse(args);
|
|
51103
|
-
const
|
|
51136
|
+
const taskId = normalizeTaskId(input.taskId);
|
|
51137
|
+
const task = await fileStore2.getTask(taskId);
|
|
51104
51138
|
if (!task) {
|
|
51105
|
-
return errorResponse(`Task ${
|
|
51139
|
+
return errorResponse(`Task ${taskId} not found`);
|
|
51106
51140
|
}
|
|
51107
51141
|
const duration3 = parseDuration(input.duration);
|
|
51108
51142
|
const startDate = input.date ? new Date(input.date) : /* @__PURE__ */ new Date();
|
|
51109
51143
|
const newEntry = {
|
|
51110
|
-
id: `
|
|
51144
|
+
id: `te-${Date.now()}`,
|
|
51111
51145
|
startedAt: startDate,
|
|
51112
51146
|
endedAt: new Date(startDate.getTime() + duration3 * 1e3),
|
|
51113
51147
|
duration: duration3,
|
|
51114
51148
|
note: input.note || "Added via MCP"
|
|
51115
51149
|
};
|
|
51116
51150
|
const newTimeSpent = task.timeSpent + duration3;
|
|
51117
|
-
await fileStore2.updateTask(
|
|
51151
|
+
await fileStore2.updateTask(taskId, {
|
|
51118
51152
|
timeEntries: [...task.timeEntries, newEntry],
|
|
51119
51153
|
timeSpent: newTimeSpent
|
|
51120
51154
|
});
|
|
51121
|
-
await notifyTaskUpdate(
|
|
51155
|
+
await notifyTaskUpdate(taskId);
|
|
51122
51156
|
return successResponse({
|
|
51123
|
-
message: `Added ${formatDuration(duration3)} to task ${
|
|
51157
|
+
message: `Added ${formatDuration(duration3)} to task ${taskId}`,
|
|
51124
51158
|
totalTime: formatDuration(newTimeSpent)
|
|
51125
51159
|
});
|
|
51126
51160
|
}
|
|
@@ -51242,9 +51276,9 @@ async function handleGetBoard(fileStore2) {
|
|
|
51242
51276
|
}
|
|
51243
51277
|
|
|
51244
51278
|
// src/mcp/handlers/doc.ts
|
|
51245
|
-
import { existsSync as
|
|
51246
|
-
import { mkdir as mkdir7, readFile as
|
|
51247
|
-
import { join as
|
|
51279
|
+
import { existsSync as existsSync10 } from "node:fs";
|
|
51280
|
+
import { mkdir as mkdir7, readFile as readFile5, readdir as readdir5, writeFile as writeFile4 } from "node:fs/promises";
|
|
51281
|
+
import { join as join14 } from "node:path";
|
|
51248
51282
|
|
|
51249
51283
|
// src/utils/markdown-toc.ts
|
|
51250
51284
|
function extractToc(markdown) {
|
|
@@ -51420,25 +51454,25 @@ var ImportError = class extends Error {
|
|
|
51420
51454
|
init_config();
|
|
51421
51455
|
|
|
51422
51456
|
// src/import/validator.ts
|
|
51423
|
-
import { existsSync as
|
|
51457
|
+
import { existsSync as existsSync6 } from "node:fs";
|
|
51424
51458
|
import { readdir as readdir2 } from "node:fs/promises";
|
|
51425
|
-
import { join as
|
|
51459
|
+
import { join as join9 } from "node:path";
|
|
51426
51460
|
var KNOWNS_DIR = ".knowns";
|
|
51427
51461
|
var TEMPLATES_DIR = "templates";
|
|
51428
51462
|
var DOCS_DIR = "docs";
|
|
51429
51463
|
async function validateKnownsDir(dir) {
|
|
51430
|
-
const knownsPath =
|
|
51431
|
-
if (!
|
|
51464
|
+
const knownsPath = join9(dir, KNOWNS_DIR);
|
|
51465
|
+
if (!existsSync6(knownsPath)) {
|
|
51432
51466
|
return {
|
|
51433
51467
|
valid: false,
|
|
51434
51468
|
error: "Source does not contain .knowns/ directory",
|
|
51435
51469
|
hint: "Only Knowns-enabled projects can be imported"
|
|
51436
51470
|
};
|
|
51437
51471
|
}
|
|
51438
|
-
const templatesPath =
|
|
51439
|
-
const docsPath =
|
|
51440
|
-
const hasTemplates =
|
|
51441
|
-
const hasDocs =
|
|
51472
|
+
const templatesPath = join9(knownsPath, TEMPLATES_DIR);
|
|
51473
|
+
const docsPath = join9(knownsPath, DOCS_DIR);
|
|
51474
|
+
const hasTemplates = existsSync6(templatesPath);
|
|
51475
|
+
const hasDocs = existsSync6(docsPath);
|
|
51442
51476
|
if (!hasTemplates && !hasDocs) {
|
|
51443
51477
|
return {
|
|
51444
51478
|
valid: false,
|
|
@@ -51477,14 +51511,14 @@ async function validateKnownsDir(dir) {
|
|
|
51477
51511
|
import { randomBytes } from "node:crypto";
|
|
51478
51512
|
import { rm } from "node:fs/promises";
|
|
51479
51513
|
import { tmpdir } from "node:os";
|
|
51480
|
-
import { join as
|
|
51514
|
+
import { join as join10 } from "node:path";
|
|
51481
51515
|
var ImportProvider = class {
|
|
51482
51516
|
/**
|
|
51483
51517
|
* Generate a unique temp directory path
|
|
51484
51518
|
*/
|
|
51485
51519
|
getTempDir() {
|
|
51486
51520
|
const id = randomBytes(8).toString("hex");
|
|
51487
|
-
return
|
|
51521
|
+
return join10(tmpdir(), `knowns-import-${id}`);
|
|
51488
51522
|
}
|
|
51489
51523
|
/**
|
|
51490
51524
|
* Cleanup temp directory
|
|
@@ -51647,9 +51681,9 @@ var gitProvider = new GitProvider();
|
|
|
51647
51681
|
|
|
51648
51682
|
// src/import/providers/npm.ts
|
|
51649
51683
|
import { spawnSync as spawnSync2 } from "node:child_process";
|
|
51650
|
-
import { existsSync as
|
|
51684
|
+
import { existsSync as existsSync7 } from "node:fs";
|
|
51651
51685
|
import { mkdir as mkdir5, readdir as readdir3, rename as rename2, rm as rm2 } from "node:fs/promises";
|
|
51652
|
-
import { join as
|
|
51686
|
+
import { join as join11 } from "node:path";
|
|
51653
51687
|
function isNpmAvailable() {
|
|
51654
51688
|
try {
|
|
51655
51689
|
const result = spawnSync2("npm", ["--version"], { encoding: "utf-8" });
|
|
@@ -51725,7 +51759,7 @@ var NpmProvider = class extends ImportProvider {
|
|
|
51725
51759
|
if (!tarball) {
|
|
51726
51760
|
throw new Error("No tarball created by npm pack");
|
|
51727
51761
|
}
|
|
51728
|
-
const tarballPath =
|
|
51762
|
+
const tarballPath = join11(tempDir, tarball);
|
|
51729
51763
|
const extractResult = spawnSync2("tar", ["-xzf", tarball], {
|
|
51730
51764
|
cwd: tempDir,
|
|
51731
51765
|
encoding: "utf-8"
|
|
@@ -51733,8 +51767,8 @@ var NpmProvider = class extends ImportProvider {
|
|
|
51733
51767
|
if (extractResult.status !== 0) {
|
|
51734
51768
|
throw new Error(extractResult.stderr || "tar extraction failed");
|
|
51735
51769
|
}
|
|
51736
|
-
const packageDir =
|
|
51737
|
-
if (!
|
|
51770
|
+
const packageDir = join11(tempDir, "package");
|
|
51771
|
+
if (!existsSync7(packageDir)) {
|
|
51738
51772
|
throw new Error("Extracted package directory not found");
|
|
51739
51773
|
}
|
|
51740
51774
|
const extractedDir = this.getTempDir();
|
|
@@ -51764,10 +51798,10 @@ var NpmProvider = class extends ImportProvider {
|
|
|
51764
51798
|
async getMetadata(tempDir, options2) {
|
|
51765
51799
|
const metadata = {};
|
|
51766
51800
|
try {
|
|
51767
|
-
const packageJsonPath =
|
|
51768
|
-
if (
|
|
51769
|
-
const { readFile:
|
|
51770
|
-
const content = await
|
|
51801
|
+
const packageJsonPath = join11(tempDir, "package.json");
|
|
51802
|
+
if (existsSync7(packageJsonPath)) {
|
|
51803
|
+
const { readFile: readFile11 } = await import("node:fs/promises");
|
|
51804
|
+
const content = await readFile11(packageJsonPath, "utf-8");
|
|
51771
51805
|
const pkg = JSON.parse(content);
|
|
51772
51806
|
if (pkg.version) {
|
|
51773
51807
|
metadata.version = pkg.version;
|
|
@@ -51784,15 +51818,15 @@ var NpmProvider = class extends ImportProvider {
|
|
|
51784
51818
|
var npmProvider = new NpmProvider();
|
|
51785
51819
|
|
|
51786
51820
|
// src/import/providers/local.ts
|
|
51787
|
-
import { existsSync as
|
|
51821
|
+
import { existsSync as existsSync8, statSync } from "node:fs";
|
|
51788
51822
|
import { cp, mkdir as mkdir6, readlink, symlink } from "node:fs/promises";
|
|
51789
51823
|
import { homedir } from "node:os";
|
|
51790
|
-
import { isAbsolute, join as
|
|
51824
|
+
import { isAbsolute, join as join12, resolve } from "node:path";
|
|
51791
51825
|
var KNOWNS_DIR3 = ".knowns";
|
|
51792
51826
|
function resolvePath(source) {
|
|
51793
51827
|
let resolved = source;
|
|
51794
51828
|
if (resolved.startsWith("~")) {
|
|
51795
|
-
resolved =
|
|
51829
|
+
resolved = join12(homedir(), resolved.slice(1));
|
|
51796
51830
|
}
|
|
51797
51831
|
if (!isAbsolute(resolved)) {
|
|
51798
51832
|
resolved = resolve(resolved);
|
|
@@ -51803,7 +51837,7 @@ var LocalProvider = class extends ImportProvider {
|
|
|
51803
51837
|
type = "local";
|
|
51804
51838
|
async validate(source, _options) {
|
|
51805
51839
|
const resolvedPath = resolvePath(source);
|
|
51806
|
-
if (!
|
|
51840
|
+
if (!existsSync8(resolvedPath)) {
|
|
51807
51841
|
return {
|
|
51808
51842
|
valid: false,
|
|
51809
51843
|
error: `Path not found: ${source}`,
|
|
@@ -51820,8 +51854,8 @@ var LocalProvider = class extends ImportProvider {
|
|
|
51820
51854
|
let knownsPath;
|
|
51821
51855
|
if (resolvedPath.endsWith(KNOWNS_DIR3)) {
|
|
51822
51856
|
knownsPath = resolvedPath;
|
|
51823
|
-
} else if (
|
|
51824
|
-
knownsPath =
|
|
51857
|
+
} else if (existsSync8(join12(resolvedPath, KNOWNS_DIR3))) {
|
|
51858
|
+
knownsPath = join12(resolvedPath, KNOWNS_DIR3);
|
|
51825
51859
|
} else {
|
|
51826
51860
|
return {
|
|
51827
51861
|
valid: false,
|
|
@@ -51834,16 +51868,16 @@ var LocalProvider = class extends ImportProvider {
|
|
|
51834
51868
|
}
|
|
51835
51869
|
async fetch(source, options2) {
|
|
51836
51870
|
const resolvedPath = resolvePath(source);
|
|
51837
|
-
if (!
|
|
51871
|
+
if (!existsSync8(resolvedPath)) {
|
|
51838
51872
|
throw new ImportError(`Path not found: ${source}`, "SOURCE_NOT_FOUND" /* SOURCE_NOT_FOUND */);
|
|
51839
51873
|
}
|
|
51840
51874
|
let sourceKnowns;
|
|
51841
51875
|
if (resolvedPath.endsWith(KNOWNS_DIR3)) {
|
|
51842
51876
|
sourceKnowns = resolvedPath;
|
|
51843
51877
|
} else {
|
|
51844
|
-
sourceKnowns =
|
|
51878
|
+
sourceKnowns = join12(resolvedPath, KNOWNS_DIR3);
|
|
51845
51879
|
}
|
|
51846
|
-
if (!
|
|
51880
|
+
if (!existsSync8(sourceKnowns)) {
|
|
51847
51881
|
throw new ImportError(
|
|
51848
51882
|
"Source does not contain .knowns/ directory",
|
|
51849
51883
|
"NO_KNOWNS_DIR" /* NO_KNOWNS_DIR */,
|
|
@@ -51879,7 +51913,7 @@ var LocalProvider = class extends ImportProvider {
|
|
|
51879
51913
|
*/
|
|
51880
51914
|
async createSymlink(source, target) {
|
|
51881
51915
|
const resolvedSource = resolvePath(source);
|
|
51882
|
-
if (
|
|
51916
|
+
if (existsSync8(target)) {
|
|
51883
51917
|
const { rm: rm3 } = await import("node:fs/promises");
|
|
51884
51918
|
await rm3(target, { recursive: true, force: true });
|
|
51885
51919
|
}
|
|
@@ -51906,16 +51940,16 @@ init_config();
|
|
|
51906
51940
|
|
|
51907
51941
|
// src/import/resolver.ts
|
|
51908
51942
|
init_config();
|
|
51909
|
-
import { existsSync as
|
|
51943
|
+
import { existsSync as existsSync9 } from "node:fs";
|
|
51910
51944
|
import { readdir as readdir4 } from "node:fs/promises";
|
|
51911
|
-
import { join as
|
|
51945
|
+
import { join as join13 } from "node:path";
|
|
51912
51946
|
var KNOWNS_DIR4 = ".knowns";
|
|
51913
51947
|
var TEMPLATES_DIR2 = "templates";
|
|
51914
51948
|
var DOCS_DIR2 = "docs";
|
|
51915
51949
|
async function getTemplateDirectories(projectRoot) {
|
|
51916
51950
|
const results = [];
|
|
51917
|
-
const localTemplates =
|
|
51918
|
-
if (
|
|
51951
|
+
const localTemplates = join13(projectRoot, KNOWNS_DIR4, TEMPLATES_DIR2);
|
|
51952
|
+
if (existsSync9(localTemplates)) {
|
|
51919
51953
|
results.push({
|
|
51920
51954
|
path: localTemplates,
|
|
51921
51955
|
source: "local",
|
|
@@ -51923,13 +51957,13 @@ async function getTemplateDirectories(projectRoot) {
|
|
|
51923
51957
|
});
|
|
51924
51958
|
}
|
|
51925
51959
|
const importsDir = getImportsDir(projectRoot);
|
|
51926
|
-
if (
|
|
51960
|
+
if (existsSync9(importsDir)) {
|
|
51927
51961
|
try {
|
|
51928
51962
|
const entries = await readdir4(importsDir, { withFileTypes: true });
|
|
51929
51963
|
for (const entry of entries) {
|
|
51930
51964
|
if (!entry.isDirectory()) continue;
|
|
51931
|
-
const importedTemplates =
|
|
51932
|
-
if (
|
|
51965
|
+
const importedTemplates = join13(importsDir, entry.name, TEMPLATES_DIR2);
|
|
51966
|
+
if (existsSync9(importedTemplates)) {
|
|
51933
51967
|
results.push({
|
|
51934
51968
|
path: importedTemplates,
|
|
51935
51969
|
source: entry.name,
|
|
@@ -51944,8 +51978,8 @@ async function getTemplateDirectories(projectRoot) {
|
|
|
51944
51978
|
}
|
|
51945
51979
|
async function getDocDirectories(projectRoot) {
|
|
51946
51980
|
const results = [];
|
|
51947
|
-
const localDocs =
|
|
51948
|
-
if (
|
|
51981
|
+
const localDocs = join13(projectRoot, KNOWNS_DIR4, DOCS_DIR2);
|
|
51982
|
+
if (existsSync9(localDocs)) {
|
|
51949
51983
|
results.push({
|
|
51950
51984
|
path: localDocs,
|
|
51951
51985
|
source: "local",
|
|
@@ -51953,13 +51987,13 @@ async function getDocDirectories(projectRoot) {
|
|
|
51953
51987
|
});
|
|
51954
51988
|
}
|
|
51955
51989
|
const importsDir = getImportsDir(projectRoot);
|
|
51956
|
-
if (
|
|
51990
|
+
if (existsSync9(importsDir)) {
|
|
51957
51991
|
try {
|
|
51958
51992
|
const entries = await readdir4(importsDir, { withFileTypes: true });
|
|
51959
51993
|
for (const entry of entries) {
|
|
51960
51994
|
if (!entry.isDirectory()) continue;
|
|
51961
|
-
const importedDocs =
|
|
51962
|
-
if (
|
|
51995
|
+
const importedDocs = join13(importsDir, entry.name, DOCS_DIR2);
|
|
51996
|
+
if (existsSync9(importedDocs)) {
|
|
51963
51997
|
results.push({
|
|
51964
51998
|
path: importedDocs,
|
|
51965
51999
|
source: entry.name,
|
|
@@ -51979,8 +52013,8 @@ async function parseImportPath(projectRoot, refPath) {
|
|
|
51979
52013
|
}
|
|
51980
52014
|
const potentialImport = parts[0];
|
|
51981
52015
|
const importsDir = getImportsDir(projectRoot);
|
|
51982
|
-
const importPath =
|
|
51983
|
-
if (
|
|
52016
|
+
const importPath = join13(importsDir, potentialImport);
|
|
52017
|
+
if (existsSync9(importPath)) {
|
|
51984
52018
|
return {
|
|
51985
52019
|
importName: potentialImport,
|
|
51986
52020
|
subPath: parts.slice(1).join("/")
|
|
@@ -51988,6 +52022,33 @@ async function parseImportPath(projectRoot, refPath) {
|
|
|
51988
52022
|
}
|
|
51989
52023
|
return { importName: null, subPath: refPath };
|
|
51990
52024
|
}
|
|
52025
|
+
async function resolveTemplate(projectRoot, templateName) {
|
|
52026
|
+
const { importName, subPath } = await parseImportPath(projectRoot, templateName);
|
|
52027
|
+
if (importName) {
|
|
52028
|
+
const importedTemplates = join13(getImportsDir(projectRoot), importName, TEMPLATES_DIR2);
|
|
52029
|
+
const templatePath = join13(importedTemplates, subPath);
|
|
52030
|
+
if (existsSync9(templatePath)) {
|
|
52031
|
+
return {
|
|
52032
|
+
path: templatePath,
|
|
52033
|
+
source: importName,
|
|
52034
|
+
isImported: true
|
|
52035
|
+
};
|
|
52036
|
+
}
|
|
52037
|
+
return null;
|
|
52038
|
+
}
|
|
52039
|
+
const directories = await getTemplateDirectories(projectRoot);
|
|
52040
|
+
for (const dir of directories) {
|
|
52041
|
+
const templatePath = join13(dir.path, templateName);
|
|
52042
|
+
if (existsSync9(templatePath)) {
|
|
52043
|
+
return {
|
|
52044
|
+
path: templatePath,
|
|
52045
|
+
source: dir.source,
|
|
52046
|
+
isImported: dir.isImported
|
|
52047
|
+
};
|
|
52048
|
+
}
|
|
52049
|
+
}
|
|
52050
|
+
return null;
|
|
52051
|
+
}
|
|
51991
52052
|
async function resolveDoc(projectRoot, docPath) {
|
|
51992
52053
|
return resolveDocWithContext(projectRoot, docPath);
|
|
51993
52054
|
}
|
|
@@ -51995,9 +52056,9 @@ async function resolveDocWithContext(projectRoot, docPath, context) {
|
|
|
51995
52056
|
const { importName, subPath } = await parseImportPath(projectRoot, docPath);
|
|
51996
52057
|
const normalizedSubPath = subPath.endsWith(".md") ? subPath : `${subPath}.md`;
|
|
51997
52058
|
if (importName) {
|
|
51998
|
-
const importedDocs =
|
|
51999
|
-
const fullPath =
|
|
52000
|
-
if (
|
|
52059
|
+
const importedDocs = join13(getImportsDir(projectRoot), importName, DOCS_DIR2);
|
|
52060
|
+
const fullPath = join13(importedDocs, normalizedSubPath);
|
|
52061
|
+
if (existsSync9(fullPath)) {
|
|
52001
52062
|
return {
|
|
52002
52063
|
path: fullPath,
|
|
52003
52064
|
source: importName,
|
|
@@ -52010,8 +52071,8 @@ async function resolveDocWithContext(projectRoot, docPath, context) {
|
|
|
52010
52071
|
if (context) {
|
|
52011
52072
|
const contextDir = directories.find((d) => d.source === context && d.isImported);
|
|
52012
52073
|
if (contextDir) {
|
|
52013
|
-
const fullPath =
|
|
52014
|
-
if (
|
|
52074
|
+
const fullPath = join13(contextDir.path, normalizedSubPath);
|
|
52075
|
+
if (existsSync9(fullPath)) {
|
|
52015
52076
|
return {
|
|
52016
52077
|
path: fullPath,
|
|
52017
52078
|
source: contextDir.source,
|
|
@@ -52023,8 +52084,8 @@ async function resolveDocWithContext(projectRoot, docPath, context) {
|
|
|
52023
52084
|
const importDirs = directories.filter((d) => d.isImported && d.source !== context);
|
|
52024
52085
|
const localDirs = directories.filter((d) => !d.isImported);
|
|
52025
52086
|
for (const dir of importDirs) {
|
|
52026
|
-
const fullPath =
|
|
52027
|
-
if (
|
|
52087
|
+
const fullPath = join13(dir.path, normalizedSubPath);
|
|
52088
|
+
if (existsSync9(fullPath)) {
|
|
52028
52089
|
return {
|
|
52029
52090
|
path: fullPath,
|
|
52030
52091
|
source: dir.source,
|
|
@@ -52033,8 +52094,8 @@ async function resolveDocWithContext(projectRoot, docPath, context) {
|
|
|
52033
52094
|
}
|
|
52034
52095
|
}
|
|
52035
52096
|
for (const dir of localDirs) {
|
|
52036
|
-
const fullPath =
|
|
52037
|
-
if (
|
|
52097
|
+
const fullPath = join13(dir.path, normalizedSubPath);
|
|
52098
|
+
if (existsSync9(fullPath)) {
|
|
52038
52099
|
return {
|
|
52039
52100
|
path: fullPath,
|
|
52040
52101
|
source: dir.source,
|
|
@@ -52061,7 +52122,7 @@ async function listAllTemplates(projectRoot) {
|
|
|
52061
52122
|
ref,
|
|
52062
52123
|
source: dir.source,
|
|
52063
52124
|
sourceUrl: dir.isImported ? sourceUrlMap.get(dir.source) : void 0,
|
|
52064
|
-
path:
|
|
52125
|
+
path: join13(dir.path, entry.name),
|
|
52065
52126
|
isImported: dir.isImported
|
|
52066
52127
|
});
|
|
52067
52128
|
}
|
|
@@ -52076,7 +52137,7 @@ async function listAllDocs(projectRoot) {
|
|
|
52076
52137
|
const sourceUrlMap = new Map(importConfigs.map((c) => [c.name, c.source]));
|
|
52077
52138
|
const results = [];
|
|
52078
52139
|
async function scanDir(baseDir, relativePath, source, isImported) {
|
|
52079
|
-
const currentDir =
|
|
52140
|
+
const currentDir = join13(baseDir, relativePath);
|
|
52080
52141
|
try {
|
|
52081
52142
|
const entries = await readdir4(currentDir, { withFileTypes: true });
|
|
52082
52143
|
for (const entry of entries) {
|
|
@@ -52092,7 +52153,7 @@ async function listAllDocs(projectRoot) {
|
|
|
52092
52153
|
ref,
|
|
52093
52154
|
source,
|
|
52094
52155
|
sourceUrl: isImported ? sourceUrlMap.get(source) : void 0,
|
|
52095
|
-
fullPath:
|
|
52156
|
+
fullPath: join13(currentDir, entry.name),
|
|
52096
52157
|
isImported
|
|
52097
52158
|
});
|
|
52098
52159
|
}
|
|
@@ -52132,8 +52193,8 @@ async function validateRefs(projectRoot, content, tasksDir) {
|
|
|
52132
52193
|
seen.add(refKey);
|
|
52133
52194
|
let exists = false;
|
|
52134
52195
|
if (tasksDir) {
|
|
52135
|
-
const taskPath =
|
|
52136
|
-
exists =
|
|
52196
|
+
const taskPath = join13(tasksDir, `task-${taskId}.md`);
|
|
52197
|
+
exists = existsSync9(taskPath);
|
|
52137
52198
|
}
|
|
52138
52199
|
results.push({
|
|
52139
52200
|
ref: `@task-${taskId}`,
|
|
@@ -52146,7 +52207,7 @@ async function validateRefs(projectRoot, content, tasksDir) {
|
|
|
52146
52207
|
}
|
|
52147
52208
|
|
|
52148
52209
|
// src/mcp/handlers/doc.ts
|
|
52149
|
-
var DOCS_DIR3 =
|
|
52210
|
+
var DOCS_DIR3 = join14(process.cwd(), ".knowns", "docs");
|
|
52150
52211
|
var listDocsSchema = external_exports3.object({
|
|
52151
52212
|
tag: external_exports3.string().optional()
|
|
52152
52213
|
});
|
|
@@ -52296,7 +52357,7 @@ var docTools = [
|
|
|
52296
52357
|
}
|
|
52297
52358
|
];
|
|
52298
52359
|
async function ensureDocsDir() {
|
|
52299
|
-
if (!
|
|
52360
|
+
if (!existsSync10(DOCS_DIR3)) {
|
|
52300
52361
|
await mkdir7(DOCS_DIR3, { recursive: true });
|
|
52301
52362
|
}
|
|
52302
52363
|
}
|
|
@@ -52305,13 +52366,13 @@ function titleToFilename(title) {
|
|
|
52305
52366
|
}
|
|
52306
52367
|
async function getAllMdFiles(dir, basePath = "") {
|
|
52307
52368
|
const files = [];
|
|
52308
|
-
if (!
|
|
52369
|
+
if (!existsSync10(dir)) {
|
|
52309
52370
|
return files;
|
|
52310
52371
|
}
|
|
52311
52372
|
const entries = await readdir5(dir, { withFileTypes: true });
|
|
52312
52373
|
for (const entry of entries) {
|
|
52313
|
-
const fullPath =
|
|
52314
|
-
const relativePath = normalizePath(basePath ?
|
|
52374
|
+
const fullPath = join14(dir, entry.name);
|
|
52375
|
+
const relativePath = normalizePath(basePath ? join14(basePath, entry.name) : entry.name);
|
|
52315
52376
|
if (entry.isDirectory()) {
|
|
52316
52377
|
const subFiles = await getAllMdFiles(fullPath, relativePath);
|
|
52317
52378
|
files.push(...subFiles);
|
|
@@ -52324,13 +52385,13 @@ async function getAllMdFiles(dir, basePath = "") {
|
|
|
52324
52385
|
async function resolveDocPath(name) {
|
|
52325
52386
|
await ensureDocsDir();
|
|
52326
52387
|
let filename = name.endsWith(".md") ? name : `${name}.md`;
|
|
52327
|
-
let filepath =
|
|
52328
|
-
if (
|
|
52388
|
+
let filepath = join14(DOCS_DIR3, filename);
|
|
52389
|
+
if (existsSync10(filepath)) {
|
|
52329
52390
|
return { filepath, filename };
|
|
52330
52391
|
}
|
|
52331
52392
|
filename = `${titleToFilename(name)}.md`;
|
|
52332
|
-
filepath =
|
|
52333
|
-
if (
|
|
52393
|
+
filepath = join14(DOCS_DIR3, filename);
|
|
52394
|
+
if (existsSync10(filepath)) {
|
|
52334
52395
|
return { filepath, filename };
|
|
52335
52396
|
}
|
|
52336
52397
|
const allFiles = await getAllMdFiles(DOCS_DIR3);
|
|
@@ -52342,7 +52403,7 @@ async function resolveDocPath(name) {
|
|
|
52342
52403
|
});
|
|
52343
52404
|
if (matchingFile) {
|
|
52344
52405
|
return {
|
|
52345
|
-
filepath:
|
|
52406
|
+
filepath: join14(DOCS_DIR3, matchingFile),
|
|
52346
52407
|
filename: matchingFile
|
|
52347
52408
|
};
|
|
52348
52409
|
}
|
|
@@ -52363,7 +52424,7 @@ async function handleListDocs(args) {
|
|
|
52363
52424
|
const docs = [];
|
|
52364
52425
|
for (const doc of allDocs) {
|
|
52365
52426
|
try {
|
|
52366
|
-
const fileContent = await
|
|
52427
|
+
const fileContent = await readFile5(doc.fullPath, "utf-8");
|
|
52367
52428
|
const { data, content } = (0, import_gray_matter3.default)(fileContent);
|
|
52368
52429
|
const metadata = data;
|
|
52369
52430
|
const stats = calculateDocStats(content);
|
|
@@ -52396,7 +52457,7 @@ async function handleGetDoc(args) {
|
|
|
52396
52457
|
if (!resolved) {
|
|
52397
52458
|
return errorResponse(`Documentation not found: ${input.path}`);
|
|
52398
52459
|
}
|
|
52399
|
-
const fileContent = await
|
|
52460
|
+
const fileContent = await readFile5(resolved.filepath, "utf-8");
|
|
52400
52461
|
const { data, content } = (0, import_gray_matter3.default)(fileContent);
|
|
52401
52462
|
const metadata = data;
|
|
52402
52463
|
if (input.smart) {
|
|
@@ -52492,7 +52553,7 @@ async function handleGetDoc(args) {
|
|
|
52492
52553
|
});
|
|
52493
52554
|
}
|
|
52494
52555
|
const projectRoot = process.cwd();
|
|
52495
|
-
const tasksDir =
|
|
52556
|
+
const tasksDir = join14(projectRoot, ".knowns", "tasks");
|
|
52496
52557
|
const refs = await validateRefs(projectRoot, content, tasksDir);
|
|
52497
52558
|
const brokenRefs = refs.filter((r) => !r.exists).map((r) => r.ref);
|
|
52498
52559
|
return successResponse({
|
|
@@ -52516,14 +52577,14 @@ async function handleCreateDoc(args) {
|
|
|
52516
52577
|
let relativePath = filename;
|
|
52517
52578
|
if (input.folder) {
|
|
52518
52579
|
const folderPath = input.folder.replace(/^\/|\/$/g, "");
|
|
52519
|
-
targetDir =
|
|
52520
|
-
relativePath =
|
|
52521
|
-
if (!
|
|
52580
|
+
targetDir = join14(DOCS_DIR3, folderPath);
|
|
52581
|
+
relativePath = join14(folderPath, filename);
|
|
52582
|
+
if (!existsSync10(targetDir)) {
|
|
52522
52583
|
await mkdir7(targetDir, { recursive: true });
|
|
52523
52584
|
}
|
|
52524
52585
|
}
|
|
52525
|
-
const filepath =
|
|
52526
|
-
if (
|
|
52586
|
+
const filepath = join14(targetDir, filename);
|
|
52587
|
+
if (existsSync10(filepath)) {
|
|
52527
52588
|
return errorResponse(`Document already exists: ${relativePath}`);
|
|
52528
52589
|
}
|
|
52529
52590
|
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
@@ -52540,7 +52601,7 @@ async function handleCreateDoc(args) {
|
|
|
52540
52601
|
}
|
|
52541
52602
|
const initialContent = input.content || "# Content\n\nWrite your documentation here.";
|
|
52542
52603
|
const fileContent = import_gray_matter3.default.stringify(initialContent, metadata);
|
|
52543
|
-
await
|
|
52604
|
+
await writeFile4(filepath, fileContent, "utf-8");
|
|
52544
52605
|
await notifyDocUpdate(relativePath);
|
|
52545
52606
|
return successResponse({
|
|
52546
52607
|
message: `Created documentation: ${relativePath}`,
|
|
@@ -52558,7 +52619,7 @@ async function handleUpdateDoc(args) {
|
|
|
52558
52619
|
if (!resolved) {
|
|
52559
52620
|
return errorResponse(`Documentation not found: ${input.path}`);
|
|
52560
52621
|
}
|
|
52561
|
-
const fileContent = await
|
|
52622
|
+
const fileContent = await readFile5(resolved.filepath, "utf-8");
|
|
52562
52623
|
const { data, content } = (0, import_gray_matter3.default)(fileContent);
|
|
52563
52624
|
const metadata = data;
|
|
52564
52625
|
if (input.title) metadata.title = input.title;
|
|
@@ -52586,7 +52647,7 @@ async function handleUpdateDoc(args) {
|
|
|
52586
52647
|
${input.appendContent}`;
|
|
52587
52648
|
}
|
|
52588
52649
|
const newFileContent = import_gray_matter3.default.stringify(updatedContent, metadata);
|
|
52589
|
-
await
|
|
52650
|
+
await writeFile4(resolved.filepath, newFileContent, "utf-8");
|
|
52590
52651
|
await notifyDocUpdate(resolved.filename);
|
|
52591
52652
|
return successResponse({
|
|
52592
52653
|
message: sectionUpdated ? `Updated section "${sectionUpdated}" in ${resolved.filename}` : `Updated documentation: ${resolved.filename}`,
|
|
@@ -52607,7 +52668,7 @@ async function handleSearchDocs(args) {
|
|
|
52607
52668
|
const query = input.query.toLowerCase();
|
|
52608
52669
|
const results = [];
|
|
52609
52670
|
for (const file3 of mdFiles) {
|
|
52610
|
-
const fileContent = await
|
|
52671
|
+
const fileContent = await readFile5(join14(DOCS_DIR3, file3), "utf-8");
|
|
52611
52672
|
const { data, content } = (0, import_gray_matter3.default)(fileContent);
|
|
52612
52673
|
const metadata = data;
|
|
52613
52674
|
if (input.tag && !metadata.tags?.includes(input.tag)) {
|
|
@@ -52644,7 +52705,7 @@ async function handleSearchDocs(args) {
|
|
|
52644
52705
|
}
|
|
52645
52706
|
|
|
52646
52707
|
// src/codegen/renderer.ts
|
|
52647
|
-
import { readFile as
|
|
52708
|
+
import { readFile as readFile6 } from "node:fs/promises";
|
|
52648
52709
|
var import_handlebars = __toESM(require_lib(), 1);
|
|
52649
52710
|
|
|
52650
52711
|
// node_modules/change-case/dist/index.js
|
|
@@ -52841,7 +52902,7 @@ function renderString(template, context) {
|
|
|
52841
52902
|
return compiled(context);
|
|
52842
52903
|
}
|
|
52843
52904
|
async function renderFile(templatePath, context) {
|
|
52844
|
-
const content = await
|
|
52905
|
+
const content = await readFile6(templatePath, "utf-8");
|
|
52845
52906
|
return renderString(content, context);
|
|
52846
52907
|
}
|
|
52847
52908
|
function renderPath(pathPattern, context) {
|
|
@@ -52869,31 +52930,31 @@ function evaluateCondition(condition, context) {
|
|
|
52869
52930
|
}
|
|
52870
52931
|
|
|
52871
52932
|
// src/instructions/guidelines/unified/commands-reference.md
|
|
52872
|
-
var commands_reference_default = '{{#if mcp}}\n# MCP Tools Reference\n\n## Task Tools\n\n### mcp__knowns__create_task\n\n```json\n{\n "title": "Task title",\n "description": "Task description",\n "status": "todo",\n "priority": "medium",\n "labels": ["label1"],\n "assignee": "@me",\n "parent": "parent-id"\n}\n```\n\n### mcp__knowns__update_task\n\n```json\n{\n "taskId": "<id>",\n "status": "in-progress",\n "assignee": "@me",\n "addAc": ["Criterion 1", "Criterion 2"],\n "checkAc": [1, 2],\n "uncheckAc": [3],\n "removeAc": [4],\n "plan": "1. Step one\\n2. Step two",\n "notes": "Implementation notes",\n "appendNotes": "Additional notes"\n}\n```\n\n| Field | Purpose |\n|-------|---------|\n| `addAc` | Add new acceptance criteria |\n| `checkAc` | Mark AC done (1-based index) |\n| `uncheckAc` | Unmark AC (1-based index) |\n| `removeAc` | Remove AC (1-based index) |\n| `plan` | Set implementation plan |\n| `notes` | Replace implementation notes |\n| `appendNotes` | Append to notes |\n\n### mcp__knowns__get_task\n\n```json\n{ "taskId": "<id>" }\n```\n\n### mcp__knowns__list_tasks\n\n```json\n{ "status": "in-progress", "assignee": "@me" }\n```\n\n### mcp__knowns__search_tasks\n\n```json\n{ "query": "keyword" }\n```\n\n---\n\n## Doc Tools\n\n### mcp__knowns__get_doc\n\n**ALWAYS use `smart: true`** - auto-handles small/large docs:\n\n```json\n{ "path": "readme", "smart": true }\n```\n\nIf large, returns TOC. Then read section:\n```json\n{ "path": "readme", "section": "3" }\n```\n\n### mcp__knowns__list_docs\n\n```json\n{ "tag": "api" }\n```\n\n### mcp__knowns__create_doc\n\n```json\n{\n "title": "Doc Title",\n "description": "Description",\n "tags": ["tag1"],\n "folder": "guides",\n "content": "Initial content"\n}\n```\n\n### mcp__knowns__update_doc\n\n```json\n{\n "path": "readme",\n "content": "Replace content",\n "section": "2"\n}\n```\n\n### mcp__knowns__search_docs\n\n```json\n{ "query": "keyword", "tag": "api" }\n```\n\n### mcp__knowns__search (Unified)\n\n```json\n{\n "query": "keyword",\n "type": "all",\n "status": "in-progress",\n "priority": "high",\n "assignee": "@me",\n "label": "feature",\n "tag": "api",\n "limit": 20\n}\n```\n\n| Field | Purpose |\n|-------|---------|\n| `type` | "all", "task", or "doc" |\n| `status/priority/assignee/label` | Task filters |\n| `tag` | Doc filter |\n| `limit` | Max results (default: 20) |\n\n---\n\n## Time Tools\n\n### mcp__knowns__start_time\n\n```json\n{ "taskId": "<id>" }\n```\n\n### mcp__knowns__stop_time\n\n```json\n{ "taskId": "<id>" }\n```\n\n### mcp__knowns__add_time\n\n```json\n{\n "taskId": "<id>",\n "duration": "2h30m",\n "note": "Note",\n "date": "2025-01-15"\n}\n```\n\n### mcp__knowns__get_time_report\n\n```json\n{ "from": "2025-01-01", "to": "2025-01-31", "groupBy": "task" }\n```\n\n---\n\n## Template Tools\n\n### mcp__knowns__list_templates\n\n```json\n{}\n```\n\n### mcp__knowns__get_template\n\n```json\n{ "name": "template-name" }\n```\n\n### mcp__knowns__run_template\n\n```json\n{\n "name": "template-name",\n "variables": { "name": "MyComponent" },\n "dryRun": true\n}\n```\n\n### mcp__knowns__create_template\n\n```json\n{\n "name": "my-template",\n "description": "Description",\n "doc": "patterns/my-pattern"\n}\n```\n\n---\n\n## Other\n\n### mcp__knowns__get_board\n\n```json\n{}\n```\n\n{{
|
|
52933
|
+
var commands_reference_default = '{{#if mcp}}\n# MCP Tools Reference\n\n## Task Tools\n\n### mcp__knowns__create_task\n\n```json\n{\n "title": "Task title",\n "description": "Task description",\n "status": "todo",\n "priority": "medium",\n "labels": ["label1"],\n "assignee": "@me",\n "parent": "parent-id"\n}\n```\n\n### mcp__knowns__update_task\n\n```json\n{\n "taskId": "<id>",\n "status": "in-progress",\n "assignee": "@me",\n "addAc": ["Criterion 1", "Criterion 2"],\n "checkAc": [1, 2],\n "uncheckAc": [3],\n "removeAc": [4],\n "plan": "1. Step one\\n2. Step two",\n "notes": "Implementation notes",\n "appendNotes": "Additional notes"\n}\n```\n\n| Field | Purpose |\n|-------|---------|\n| `addAc` | Add new acceptance criteria |\n| `checkAc` | Mark AC done (1-based index) |\n| `uncheckAc` | Unmark AC (1-based index) |\n| `removeAc` | Remove AC (1-based index) |\n| `plan` | Set implementation plan |\n| `notes` | Replace implementation notes |\n| `appendNotes` | Append to notes |\n\n### mcp__knowns__get_task\n\n```json\n{ "taskId": "<id>" }\n```\n\n### mcp__knowns__list_tasks\n\n```json\n{ "status": "in-progress", "assignee": "@me" }\n```\n\n### mcp__knowns__search_tasks\n\n```json\n{ "query": "keyword" }\n```\n\n---\n\n## Doc Tools\n\n### mcp__knowns__get_doc\n\n**ALWAYS use `smart: true`** - auto-handles small/large docs:\n\n```json\n{ "path": "readme", "smart": true }\n```\n\nIf large, returns TOC. Then read section:\n```json\n{ "path": "readme", "section": "3" }\n```\n\n### mcp__knowns__list_docs\n\n```json\n{ "tag": "api" }\n```\n\n### mcp__knowns__create_doc\n\n```json\n{\n "title": "Doc Title",\n "description": "Description",\n "tags": ["tag1"],\n "folder": "guides",\n "content": "Initial content"\n}\n```\n\n### mcp__knowns__update_doc\n\n```json\n{\n "path": "readme",\n "content": "Replace content",\n "section": "2"\n}\n```\n\n### mcp__knowns__search_docs\n\n```json\n{ "query": "keyword", "tag": "api" }\n```\n\n### mcp__knowns__search (Unified)\n\n```json\n{\n "query": "keyword",\n "type": "all",\n "status": "in-progress",\n "priority": "high",\n "assignee": "@me",\n "label": "feature",\n "tag": "api",\n "limit": 20\n}\n```\n\n| Field | Purpose |\n|-------|---------|\n| `type` | "all", "task", or "doc" |\n| `status/priority/assignee/label` | Task filters |\n| `tag` | Doc filter |\n| `limit` | Max results (default: 20) |\n\n---\n\n## Time Tools\n\n### mcp__knowns__start_time\n\n```json\n{ "taskId": "<id>" }\n```\n\n### mcp__knowns__stop_time\n\n```json\n{ "taskId": "<id>" }\n```\n\n### mcp__knowns__add_time\n\n```json\n{\n "taskId": "<id>",\n "duration": "2h30m",\n "note": "Note",\n "date": "2025-01-15"\n}\n```\n\n### mcp__knowns__get_time_report\n\n```json\n{ "from": "2025-01-01", "to": "2025-01-31", "groupBy": "task" }\n```\n\n---\n\n## Template Tools\n\n### mcp__knowns__list_templates\n\n```json\n{}\n```\n\n### mcp__knowns__get_template\n\n```json\n{ "name": "template-name" }\n```\n\n### mcp__knowns__run_template\n\n```json\n{\n "name": "template-name",\n "variables": { "name": "MyComponent" },\n "dryRun": true\n}\n```\n\n### mcp__knowns__create_template\n\n```json\n{\n "name": "my-template",\n "description": "Description",\n "doc": "patterns/my-pattern"\n}\n```\n\n---\n\n## Other\n\n### mcp__knowns__get_board\n\n```json\n{}\n```\n\n{{/if}}\n{{#if cli}}\n# CLI Commands Reference\n\n## task create\n\n```bash\nknowns task create <title> [options]\n```\n\n| Flag | Short | Purpose |\n|------|-------|---------|\n| `--description` | `-d` | Task description |\n| `--ac` | | Acceptance criterion (repeatable) |\n| `--labels` | `-l` | Comma-separated labels |\n| `--assignee` | `-a` | Assign to user |\n| `--priority` | | low/medium/high |\n| `--parent` | | Parent task ID (raw ID only!) |\n\n**`-a` = assignee, NOT acceptance criteria! Use `--ac` for AC.**\n\n---\n\n## task edit\n\n```bash\nknowns task edit <id> [options]\n```\n\n| Flag | Short | Purpose |\n|------|-------|---------|\n| `--status` | `-s` | Change status |\n| `--assignee` | `-a` | Assign user |\n| `--ac` | | Add acceptance criterion |\n| `--check-ac` | | Mark AC done (1-indexed) |\n| `--uncheck-ac` | | Unmark AC |\n| `--plan` | | Set implementation plan |\n| `--notes` | | Replace notes |\n| `--append-notes` | | Add to notes |\n\n---\n\n## task view/list\n\n```bash\nknowns task <id> --plain\nknowns task list --plain\nknowns task list --status in-progress --plain\nknowns task list --tree --plain\n```\n\n---\n\n## doc create\n\n```bash\nknowns doc create <title> [options]\n```\n\n| Flag | Short | Purpose |\n|------|-------|---------|\n| `--description` | `-d` | Description |\n| `--tags` | `-t` | Comma-separated tags |\n| `--folder` | `-f` | Folder path |\n\n---\n\n## doc edit\n\n```bash\nknowns doc edit <name> [options]\n```\n\n| Flag | Short | Purpose |\n|------|-------|---------|\n| `--content` | `-c` | Replace content |\n| `--append` | `-a` | Append content |\n| `--section` | | Target section (use with -c) |\n\n**In doc edit, `-a` = append content, NOT assignee!**\n\n---\n\n## doc view/list\n\n**ALWAYS use `--smart`** - auto-handles small/large docs:\n\n```bash\nknowns doc <path> --plain --smart\n```\n\nIf large, returns TOC. Then read section:\n```bash\nknowns doc <path> --plain --section 3\n```\n\n```bash\nknowns doc list --plain\nknowns doc list --tag api --plain\n```\n\n---\n\n## time\n\n```bash\nknowns time start <id> # REQUIRED when taking task\nknowns time stop # REQUIRED when completing\nknowns time status\nknowns time add <id> <duration> -n "Note"\n```\n\n---\n\n## search\n\n```bash\nknowns search "query" --plain\nknowns search "auth" --type task --plain\nknowns search "api" --type doc --plain\n```\n\n---\n\n## template\n\n```bash\nknowns template list\nknowns template info <name>\nknowns template run <name> --name "X" --dry-run\nknowns template create <name>\n```\n\n---\n\n## Multi-line Input\n\n```bash\nknowns task edit <id> --plan $\'1. Step\\n2. Step\\n3. Step\'\n```\n{{/if}}\n';
|
|
52873
52934
|
|
|
52874
52935
|
// src/instructions/guidelines/unified/common-mistakes.md
|
|
52875
|
-
var common_mistakes_default = '# Common Mistakes\n\n{{#
|
|
52936
|
+
var common_mistakes_default = '# Common Mistakes\n\n{{#if cli}}\n## CRITICAL: The -a Flag\n\n| Command | `-a` Means | NOT This! |\n|---------|------------|-----------|\n| `task create/edit` | `--assignee` | ~~acceptance criteria~~ |\n| `doc edit` | `--append` | ~~assignee~~ |\n\n```bash\n# WRONG (sets assignee to garbage!)\nknowns task edit 35 -a "Criterion text"\n\n# CORRECT (use --ac)\nknowns task edit 35 --ac "Criterion text"\n```\n\n---\n{{/if}}\n\n## CRITICAL: Notes vs Append Notes\n\n**NEVER use `notes`/`--notes` for progress updates - it REPLACES all existing notes!**\n\n{{#if cli}}\n```bash\n# \u274C WRONG - Destroys audit trail!\nknowns task edit <id> --notes "Done: feature X"\n\n# \u2705 CORRECT - Preserves history\nknowns task edit <id> --append-notes "Done: feature X"\n```\n{{/if}}\n{{#if mcp}}\n```json\n// \u274C WRONG - Destroys audit trail!\nmcp__knowns__update_task({\n "taskId": "<id>",\n "notes": "Done: feature X"\n})\n\n// \u2705 CORRECT - Preserves history\nmcp__knowns__update_task({\n "taskId": "<id>",\n "appendNotes": "Done: feature X"\n})\n```\n{{/if}}\n\n| Field | Behavior |\n|-------|----------|\n{{#if cli}}\n| `--notes` | **REPLACES** all notes (use only for initial setup) |\n| `--append-notes` | **APPENDS** to existing notes (use for progress) |\n{{/if}}\n{{#if mcp}}\n| `notes` | **REPLACES** all notes (use only for initial setup) |\n| `appendNotes` | **APPENDS** to existing notes (use for progress) |\n{{/if}}\n\n---\n\n## Quick Reference\n\n| DON\'T | DO |\n|-------|-----|\n{{#if cli}}\n| Edit .md files directly | Use CLI commands |\n| `-a "criterion"` | `--ac "criterion"` |\n| `--parent task-48` | `--parent 48` (raw ID) |\n| `--plain` with create/edit | `--plain` only for view/list |\n| `--notes` for progress | `--append-notes` for progress |\n{{/if}}\n{{#if mcp}}\n| Edit .md files directly | Use MCP tools |\n| `notes` for progress | `appendNotes` for progress |\n{{/if}}\n| Check AC before work done | Check AC AFTER work done |\n| Code before plan approval | Wait for user approval |\n| Code before reading docs | Read docs FIRST |\n| Skip time tracking | Always start/stop timer |\n| Ignore refs | Follow ALL `@task-xxx`, `@doc/xxx`, `@template/xxx` refs |\n\n{{#if mcp}}\n---\n\n## MCP Task Operations\n\nAll task operations are available via MCP:\n\n| Operation | MCP Field |\n|-----------|-----------|\n| Add acceptance criteria | `addAc: ["criterion"]` |\n| Check AC | `checkAc: [1, 2]` (1-based) |\n| Uncheck AC | `uncheckAc: [1]` (1-based) |\n| Remove AC | `removeAc: [1]` (1-based) |\n| Set plan | `plan: "..."` |\n| Set notes | `notes: "..."` |\n| Append notes | `appendNotes: "..."` |\n| Change status | `status: "in-progress"` |\n| Assign | `assignee: "@me"` |\n{{/if}}\n\n---\n\n## Template Syntax Pitfalls\n\nWhen writing `.hbs` templates, **NEVER** create `$` followed by triple-brace - Handlebars interprets triple-brace as unescaped output:\n\n```\n// \u274C WRONG - Parse error!\nthis.logger.log(`Created: $` + `{` + `{` + `{camelCase entity}.id}`);\n\n// \u2705 CORRECT - Add space between ${ and double-brace, use ~ to trim whitespace\nthis.logger.log(`Created: ${ \\{{~camelCase entity~}}.id}`);\n```\n\n| DON\'T | DO |\n|-------|-----|\n| `$` + triple-brace | `${ \\{{~helper~}}}` (space + escaped) |\n\n**Rules:**\n- Add space between `${` and double-brace\n- Use `~` (tilde) to trim whitespace in output\n- Escape literal braces with backslash\n\n---\n\n## Error Recovery\n\n| Problem | Solution |\n|---------|----------|\n{{#if cli}}\n| Set assignee to AC text | `knowns task edit <id> -a @me` |\n| Forgot to stop timer | `knowns time add <id> <duration>` |\n| Checked AC too early | `knowns task edit <id> --uncheck-ac N` |\n| Task not found | `knowns task list --plain` |\n| Replaced notes by mistake | Cannot recover - notes are lost. Use `--append-notes` next time |\n{{/if}}\n{{#if mcp}}\n| Forgot to stop timer | `mcp__knowns__add_time` with duration |\n| Wrong status | `mcp__knowns__update_task` to fix |\n| Task not found | `mcp__knowns__list_tasks` to find ID |\n| Need to uncheck AC | `mcp__knowns__update_task` with `uncheckAc: [N]` |\n| Checked AC too early | `mcp__knowns__update_task` with `uncheckAc: [N]` |\n| Replaced notes by mistake | Cannot recover - notes are lost. Use `appendNotes` next time |\n{{/if}}\n';
|
|
52876
52937
|
|
|
52877
52938
|
// src/instructions/guidelines/unified/context-optimization.md
|
|
52878
|
-
var context_optimization_default = '# Context Optimization\n\nOptimize your context usage to work more efficiently within token limits.\n\n---\n\n{{#
|
|
52939
|
+
var context_optimization_default = '# Context Optimization\n\nOptimize your context usage to work more efficiently within token limits.\n\n---\n\n{{#if cli}}\n## Output Format\n\n```bash\n# Verbose output\nknowns task 42 --json\n\n# Compact output (always use --plain)\nknowns task 42 --plain\n```\n\n---\n{{/if}}\n\n## Search Before Read\n\n{{#if cli}}\n### CLI\n```bash\n# DON\'T: Read all docs hoping to find info\nknowns doc "doc1" --plain\nknowns doc "doc2" --plain\n\n# DO: Search first, then read only relevant docs\nknowns search "authentication" --type doc --plain\nknowns doc "security-patterns" --plain\n```\n{{/if}}\n{{#if mcp}}\n### MCP\n```json\n// DON\'T: Read all docs hoping to find info\nmcp__knowns__get_doc({ "path": "doc1" })\nmcp__knowns__get_doc({ "path": "doc2" })\n\n// DO: Search first, then read only relevant docs\nmcp__knowns__search_docs({ "query": "authentication" })\nmcp__knowns__get_doc({ "path": "security-patterns" })\n```\n{{/if}}\n\n---\n\n{{#if mcp}}\n## Use Filters\n\n```json\n// DON\'T: List all then filter manually\nmcp__knowns__list_tasks({})\n\n// DO: Use filters in the query\nmcp__knowns__list_tasks({\n "status": "in-progress",\n "assignee": "@me"\n})\n```\n\n---\n{{/if}}\n\n## Reading Documents\n\n{{#if cli}}\n### CLI\n**ALWAYS use `--smart`** - auto-handles both small and large docs:\n\n```bash\n# DON\'T: Read without --smart\nknowns doc readme --plain\n\n# DO: Always use --smart\nknowns doc readme --plain --smart\n# Small doc \u2192 full content\n# Large doc \u2192 stats + TOC\n\n# If large, read specific section:\nknowns doc readme --plain --section 3\n```\n{{/if}}\n{{#if mcp}}\n### MCP\n**ALWAYS use `smart: true`** - auto-handles both small and large docs:\n\n```json\n// DON\'T: Read without smart\nmcp__knowns__get_doc({ "path": "readme" })\n\n// DO: Always use smart\nmcp__knowns__get_doc({ "path": "readme", "smart": true })\n// Small doc \u2192 full content\n// Large doc \u2192 stats + TOC\n\n// If large, read specific section:\nmcp__knowns__get_doc({ "path": "readme", "section": "3" })\n```\n{{/if}}\n\n**Behavior:**\n- **\u22642000 tokens**: Returns full content automatically\n- **>2000 tokens**: Returns stats + TOC, then use section parameter\n\n---\n\n## Compact Notes\n\n```bash\n# DON\'T: Verbose notes\nknowns task edit 42 --append-notes "I have successfully completed the implementation..."\n\n# DO: Compact notes\nknowns task edit 42 --append-notes "Done: Auth middleware + JWT validation"\n```\n\n---\n\n## Avoid Redundant Operations\n\n| Don\'t | Do Instead |\n|-------|------------|\n| Re-read files already in context | Reference from memory |\n| List tasks/docs multiple times | List once, remember results |\n| Quote entire file contents | Summarize key points |\n\n---\n\n## Efficient Workflow\n\n| Phase | Context-Efficient Approach |\n|-------|---------------------------|\n| **Research** | Search \u2192 Read only matches |\n| **Planning** | Brief plan, not detailed prose |\n| **Coding** | Read only files being modified |\n| **Notes** | Bullet points, not paragraphs |\n| **Completion** | Summary, not full log |\n\n---\n\n## Quick Rules\n\n{{#if cli}}\n1. **Always `--plain`** - Never use `--json` unless needed\n2. **Always `--smart`** - Auto-handles doc size\n{{/if}}\n{{#if mcp}}\n1. **Always `smart: true`** - Auto-handles doc size\n{{/if}}\n3. **Search first** - Don\'t read all docs hoping to find info\n4. **Read selectively** - Only fetch what you need\n5. **Write concise** - Compact notes, not essays\n6. **Don\'t repeat** - Reference context already loaded\n';
|
|
52879
52940
|
|
|
52880
52941
|
// src/instructions/guidelines/unified/core-rules.md
|
|
52881
|
-
var core_rules_default = '# Core Rules\n\n> These rules are NON-NEGOTIABLE. Violating them leads to data corruption and lost work.\n\n---\n\n## The Golden Rule\n\n{{#if mcp}}\n**If you want to change ANYTHING in a task or doc, use MCP tools. NEVER edit .md files directly.**\n{{else}}\n**If you want to change ANYTHING in a task or doc, use CLI commands. NEVER edit .md files directly.**\n{{/if}}\n\n{{#
|
|
52942
|
+
var core_rules_default = '# Core Rules\n\n> These rules are NON-NEGOTIABLE. Violating them leads to data corruption and lost work.\n\n---\n\n## The Golden Rule\n\n{{#if mcp}}\n{{#if cli}}\n**If you want to change ANYTHING in a task or doc, use MCP tools (preferred) or CLI commands (fallback). NEVER edit .md files directly.**\n{{else}}\n**If you want to change ANYTHING in a task or doc, use MCP tools. NEVER edit .md files directly.**\n{{/if}}\n{{else}}\n{{#if cli}}\n**If you want to change ANYTHING in a task or doc, use CLI commands. NEVER edit .md files directly.**\n{{/if}}\n{{/if}}\n\n{{#if cli}}\n---\n\n## CRITICAL: The -a Flag Confusion\n\nThe `-a` flag means DIFFERENT things in different commands:\n\n| Command | `-a` Means | NOT This! |\n|---------|------------|-----------|\n| `task create` | `--assignee` (assign user) | ~~acceptance criteria~~ |\n| `task edit` | `--assignee` (assign user) | ~~acceptance criteria~~ |\n| `doc edit` | `--append` (append content) | ~~assignee~~ |\n\n### Acceptance Criteria: Use --ac\n\n```bash\n# WRONG: -a is assignee, NOT acceptance criteria!\nknowns task edit 35 -a "- [ ] Criterion" # Sets assignee to garbage!\n\n# CORRECT: Use --ac for acceptance criteria\nknowns task edit 35 --ac "Criterion one"\nknowns task create "Title" --ac "Criterion one" --ac "Criterion two"\n```\n{{/if}}\n\n---\n\n## Quick Reference\n\n| Rule | Description |\n|------|-------------|\n{{#if mcp}}\n{{#if cli}}\n| **MCP Tools (preferred)** | Use MCP tools for ALL operations. Fallback to CLI if needed. NEVER edit .md files directly |\n{{else}}\n| **MCP Tools Only** | Use MCP tools for ALL operations. NEVER edit .md files directly |\n{{/if}}\n{{else}}\n{{#if cli}}\n| **CLI Only** | Use commands for ALL operations. NEVER edit .md files directly |\n{{/if}}\n{{/if}}\n| **Docs First** | Read project docs BEFORE planning or coding |\n| **Time Tracking** | Start timer when taking task, stop when done |\n| **Plan Approval** | Share plan with user, WAIT for approval before coding |\n| **Check AC After** | Only mark criteria done AFTER completing work |\n\n{{#if cli}}\n---\n\n## The --plain Flag\n\n**ONLY for view/list/search commands (NOT create/edit):**\n\n```bash\n# CORRECT\nknowns task <id> --plain\nknowns task list --plain\nknowns doc "path" --plain\nknowns search "query" --plain\n\n# WRONG (create/edit don\'t support --plain)\nknowns task create "Title" --plain # ERROR!\nknowns task edit <id> -s done --plain # ERROR!\n```\n{{/if}}\n\n---\n\n## Reference System\n\nTasks, docs, and templates can reference each other:\n\n| Type | Writing (Input) | Reading (Output) |\n|------|-----------------|------------------|\n| Task | `@task-<id>` | `@.knowns/tasks/task-<id>` |\n| Doc | `@doc/<path>` | `@.knowns/docs/<path>.md` |\n| Template | `@template/<name>` | `@.knowns/templates/<name>` |\n\n**Always follow refs recursively** to gather complete context before planning.\n\n---\n\n## Subtasks\n\n{{#if cli}}\n### CLI\n```bash\nknowns task create "Subtask title" --parent 48\n```\n\n**CRITICAL:** Use raw ID for `--parent`:\n```bash\n# CORRECT\nknowns task create "Title" --parent 48\n\n# WRONG\nknowns task create "Title" --parent task-48\n```\n{{/if}}\n{{#if mcp}}\n### MCP\n```json\nmcp__knowns__create_task({\n "title": "Subtask title",\n "parent": "parent-task-id"\n})\n```\n\n**CRITICAL:** Use raw ID (string) for all MCP tool calls.\n{{/if}}\n';
|
|
52882
52943
|
|
|
52883
52944
|
// src/instructions/guidelines/unified/workflow-completion.md
|
|
52884
|
-
var workflow_completion_default = '# Task Completion\n\n## Definition of Done\n\nA task is **Done** when ALL of these are complete:\n\n{{#if
|
|
52945
|
+
var workflow_completion_default = '# Task Completion\n\n## Definition of Done\n\nA task is **Done** when ALL of these are complete:\n\n{{#if cli}}\n### CLI\n| Requirement | Command |\n|-------------|---------|\n| All AC checked | `knowns task edit <id> --check-ac N` |\n| Notes added | `knowns task edit <id> --notes "Summary"` |\n| Timer stopped | `knowns time stop` |\n| Status = done | `knowns task edit <id> -s done` |\n| Tests pass | Run test suite |\n{{/if}}\n{{#if mcp}}\n### MCP\n| Requirement | How |\n|-------------|-----|\n| All AC checked | `mcp__knowns__update_task` with `checkAc` |\n| Notes added | `mcp__knowns__update_task` with `notes` |\n| Timer stopped | `mcp__knowns__stop_time` |\n| Status = done | `mcp__knowns__update_task` with `status: "done"` |\n| Tests pass | Run test suite |\n{{/if}}\n\n---\n\n## Completion Steps\n\n{{#if cli}}\n### CLI\n```bash\n# 1. Verify all AC are checked\nknowns task <id> --plain\n\n# 2. Add implementation notes\nknowns task edit <id> --notes $\'## Summary\nWhat was done and key decisions.\'\n\n# 3. Stop timer (REQUIRED!)\nknowns time stop\n\n# 4. Mark done\nknowns task edit <id> -s done\n```\n{{/if}}\n{{#if mcp}}\n### MCP\n```json\n// 1. Verify all AC are checked\nmcp__knowns__get_task({ "taskId": "<id>" })\n\n// 2. Add implementation notes\nmcp__knowns__update_task({\n "taskId": "<id>",\n "notes": "## Summary\\nWhat was done and key decisions."\n})\n\n// 3. Stop timer (REQUIRED!)\nmcp__knowns__stop_time({ "taskId": "<id>" })\n\n// 4. Mark done\nmcp__knowns__update_task({\n "taskId": "<id>",\n "status": "done"\n})\n```\n{{/if}}\n\n---\n\n## Post-Completion Changes\n\nIf user requests changes after task is done:\n\n{{#if cli}}\n### CLI\n```bash\nknowns task edit <id> -s in-progress # Reopen\nknowns time start <id> # Restart timer\nknowns task edit <id> --ac "Fix: description"\nknowns task edit <id> --append-notes "Reopened: reason"\n```\n{{/if}}\n{{#if mcp}}\n### MCP\n```json\n// 1. Reopen task\nmcp__knowns__update_task({\n "taskId": "<id>",\n "status": "in-progress"\n})\n\n// 2. Restart timer\nmcp__knowns__start_time({ "taskId": "<id>" })\n\n// 3. Add AC for the fix\nmcp__knowns__update_task({\n "taskId": "<id>",\n "addAc": ["Fix: description"],\n "appendNotes": "Reopened: reason"\n})\n```\n{{/if}}\n\nThen follow completion steps again.\n\n---\n\n## Checklist\n\n{{#if cli}}\n### CLI\n- [ ] All AC checked (`--check-ac`)\n- [ ] Notes added (`--notes`)\n- [ ] Timer stopped (`time stop`)\n- [ ] Tests pass\n- [ ] Status = done (`-s done`)\n{{/if}}\n{{#if mcp}}\n### MCP\n- [ ] All AC checked (`checkAc`)\n- [ ] Notes added (`notes`)\n- [ ] Timer stopped (`mcp__knowns__stop_time`)\n- [ ] Tests pass\n- [ ] Status = done (`mcp__knowns__update_task`)\n{{/if}}\n';
|
|
52885
52946
|
|
|
52886
52947
|
// src/instructions/guidelines/unified/workflow-creation.md
|
|
52887
|
-
var workflow_creation_default = '# Task Creation\n\n## Before Creating\n\n{{#if
|
|
52948
|
+
var workflow_creation_default = '# Task Creation\n\n## Before Creating\n\n{{#if cli}}\n### CLI\n```bash\n# Search for existing tasks first\nknowns search "keyword" --type task --plain\n```\n{{/if}}\n{{#if mcp}}\n### MCP\n```json\n// Search for existing tasks first\nmcp__knowns__search_tasks({ "query": "keyword" })\n```\n{{/if}}\n\n---\n\n## Create Task\n\n{{#if cli}}\n### CLI\n```bash\nknowns task create "Clear title (WHAT)" \\\n -d "Description (WHY)" \\\n --ac "Outcome 1" \\\n --ac "Outcome 2" \\\n --priority medium \\\n -l "labels"\n```\n{{/if}}\n{{#if mcp}}\n### MCP\n```json\nmcp__knowns__create_task({\n "title": "Clear title (WHAT)",\n "description": "Description (WHY). Related: @doc/security-patterns",\n "priority": "medium",\n "labels": ["feature", "auth"]\n})\n```\n\n**Note:** Add acceptance criteria after creation:\n```bash\nknowns task edit <id> --ac "Outcome 1" --ac "Outcome 2"\n```\n{{/if}}\n\n---\n\n## Quality Guidelines\n\n### Title\n| Bad | Good |\n|-----|------|\n| Do auth stuff | Add JWT authentication |\n| Fix bug | Fix login timeout |\n\n### Description\nExplain WHY. Include doc refs: `@doc/security-patterns`\n\n### Acceptance Criteria\n**Outcome-focused, NOT implementation steps:**\n\n| Bad | Good |\n|-----|------|\n| Add handleLogin() function | User can login |\n| Use bcrypt | Passwords are hashed |\n| Add try-catch | Errors return proper HTTP codes |\n\n---\n\n## Subtasks\n\n{{#if cli}}\n### CLI\n```bash\nknowns task create "Parent task"\nknowns task create "Subtask" --parent 48 # Raw ID only!\n```\n{{/if}}\n{{#if mcp}}\n### MCP\n```json\n// Create parent first\nmcp__knowns__create_task({ "title": "Parent task" })\n\n// Then create subtask with parent ID\nmcp__knowns__create_task({\n "title": "Subtask",\n "parent": "parent-task-id"\n})\n```\n{{/if}}\n\n---\n\n## Anti-Patterns\n\n- Too many AC in one task -> Split into multiple tasks\n- Implementation steps as AC -> Write outcomes instead\n- Skip search -> Always check existing tasks first\n';
|
|
52888
52949
|
|
|
52889
52950
|
// src/instructions/guidelines/unified/workflow-execution.md
|
|
52890
|
-
var workflow_execution_default = '# Task Execution\n\n## Step 1: Take Task\n\n{{#if mcp}}\n```json\n// Update status and assignee\nmcp__knowns__update_task({\n "taskId": "<id>",\n "status": "in-progress",\n "assignee": "@me"\n})\n\n// Start timer (REQUIRED!)\nmcp__knowns__start_time({ "taskId": "<id>" })\n```\n{{
|
|
52951
|
+
var workflow_execution_default = '# Task Execution\n\n## Step 1: Take Task\n\n{{#if cli}}\n### CLI\n```bash\nknowns task edit <id> -s in-progress -a @me\nknowns time start <id> # REQUIRED!\n```\n{{/if}}\n{{#if mcp}}\n### MCP\n```json\n// Update status and assignee\nmcp__knowns__update_task({\n "taskId": "<id>",\n "status": "in-progress",\n "assignee": "@me"\n})\n\n// Start timer (REQUIRED!)\nmcp__knowns__start_time({ "taskId": "<id>" })\n```\n{{/if}}\n\n---\n\n## Step 2: Research\n\n{{#if cli}}\n### CLI\n```bash\n# Read task and follow ALL refs\nknowns task <id> --plain\n# @doc/xxx \u2192 knowns doc "xxx" --plain\n# @task-YY \u2192 knowns task YY --plain\n\n# Search related docs\nknowns search "keyword" --type doc --plain\n\n# Check similar done tasks\nknowns search "keyword" --type task --status done --plain\n```\n{{/if}}\n{{#if mcp}}\n### MCP\n```json\n// Read task and follow ALL refs\nmcp__knowns__get_task({ "taskId": "<id>" })\n\n// @doc/xxx -> read the doc\nmcp__knowns__get_doc({ "path": "xxx", "smart": true })\n\n// @task-YY -> read the task\nmcp__knowns__get_task({ "taskId": "YY" })\n\n// Search related docs\nmcp__knowns__search_docs({ "query": "keyword" })\n```\n{{/if}}\n\n---\n\n## Step 3: Plan (BEFORE coding!)\n\n{{#if cli}}\n### CLI\n```bash\nknowns task edit <id> --plan $\'1. Research (see @doc/xxx)\n2. Implement\n3. Test\n4. Document\'\n```\n{{/if}}\n{{#if mcp}}\n### MCP\n```json\nmcp__knowns__update_task({\n "taskId": "<id>",\n "plan": "1. Research (see @doc/xxx)\\n2. Implement\\n3. Test\\n4. Document"\n})\n```\n{{/if}}\n\n**Share plan with user. WAIT for approval before coding.**\n\n---\n\n## Step 4: Implement\n\n{{#if cli}}\n### CLI\n```bash\n# Check AC only AFTER work is done\nknowns task edit <id> --check-ac 1\nknowns task edit <id> --append-notes "Done: feature X"\n```\n{{/if}}\n{{#if mcp}}\n### MCP\n```json\n// Check AC only AFTER work is done\nmcp__knowns__update_task({\n "taskId": "<id>",\n "checkAc": [1],\n "appendNotes": "Done: feature X"\n})\n```\n{{/if}}\n\n---\n\n## Scope Changes\n\nIf new requirements emerge during work:\n\n{{#if cli}}\n### CLI\n```bash\n# Small: Add to current task\nknowns task edit <id> --ac "New requirement"\nknowns task edit <id> --append-notes "Scope updated: reason"\n\n# Large: Ask user first, then create follow-up\nknowns task create "Follow-up: feature" -d "From task <id>"\n```\n{{/if}}\n{{#if mcp}}\n### MCP\n```json\n// Small: Add to current task\nmcp__knowns__update_task({\n "taskId": "<id>",\n "addAc": ["New requirement"],\n "appendNotes": "Scope updated: reason"\n})\n\n// Large: Ask user first, then create follow-up\nmcp__knowns__create_task({\n "title": "Follow-up: feature",\n "description": "From task <id>"\n})\n```\n{{/if}}\n\n**Don\'t silently expand scope. Ask user first.**\n\n---\n\n## Key Rules\n\n1. **Plan before code** - Capture approach first\n2. **Wait for approval** - Don\'t start without OK\n3. **Check AC after work** - Not before\n4. **Ask on scope changes** - Don\'t expand silently\n';
|
|
52891
52952
|
|
|
52892
52953
|
// src/instructions/guidelines/unified/index.ts
|
|
52893
52954
|
function render(template, mode) {
|
|
52894
52955
|
return renderString(template, {
|
|
52895
|
-
mcp: mode === "mcp",
|
|
52896
|
-
cli: mode === "cli"
|
|
52956
|
+
mcp: mode === "mcp" || mode === "unified",
|
|
52957
|
+
cli: mode === "cli" || mode === "unified"
|
|
52897
52958
|
}).trim();
|
|
52898
52959
|
}
|
|
52899
52960
|
function createGuidelines(mode) {
|
|
@@ -52961,6 +53022,7 @@ ${content}
|
|
|
52961
53022
|
}
|
|
52962
53023
|
var CLIGuidelines = createGuidelines("cli");
|
|
52963
53024
|
var MCPGuidelines = createGuidelines("mcp");
|
|
53025
|
+
var UnifiedGuidelines = createGuidelines("unified");
|
|
52964
53026
|
var Guidelines = CLIGuidelines;
|
|
52965
53027
|
|
|
52966
53028
|
// src/instructions/guidelines/index.ts
|
|
@@ -53017,9 +53079,9 @@ async function handleGetGuideline(args) {
|
|
|
53017
53079
|
}
|
|
53018
53080
|
|
|
53019
53081
|
// src/mcp/handlers/template.ts
|
|
53020
|
-
import { existsSync as
|
|
53021
|
-
import { mkdir as mkdir9, writeFile as
|
|
53022
|
-
import { join as
|
|
53082
|
+
import { existsSync as existsSync13 } from "node:fs";
|
|
53083
|
+
import { mkdir as mkdir9, writeFile as writeFile6 } from "node:fs/promises";
|
|
53084
|
+
import { join as join17 } from "node:path";
|
|
53023
53085
|
|
|
53024
53086
|
// src/codegen/schema.ts
|
|
53025
53087
|
var PromptChoiceSchema = external_exports3.object({
|
|
@@ -53099,9 +53161,9 @@ function safeValidateTemplateConfig(data) {
|
|
|
53099
53161
|
|
|
53100
53162
|
// src/codegen/parser.ts
|
|
53101
53163
|
var import_yaml = __toESM(require_dist2(), 1);
|
|
53102
|
-
import { existsSync as
|
|
53103
|
-
import { readFile as
|
|
53104
|
-
import { join as
|
|
53164
|
+
import { existsSync as existsSync11 } from "node:fs";
|
|
53165
|
+
import { readFile as readFile7, readdir as readdir6 } from "node:fs/promises";
|
|
53166
|
+
import { join as join15 } from "node:path";
|
|
53105
53167
|
var CONFIG_FILENAME = "_template.yaml";
|
|
53106
53168
|
var TEMPLATE_EXTENSION = ".hbs";
|
|
53107
53169
|
var TemplateParseError = class extends Error {
|
|
@@ -53113,16 +53175,16 @@ var TemplateParseError = class extends Error {
|
|
|
53113
53175
|
}
|
|
53114
53176
|
};
|
|
53115
53177
|
async function loadTemplate(templateDir) {
|
|
53116
|
-
if (!
|
|
53178
|
+
if (!existsSync11(templateDir)) {
|
|
53117
53179
|
throw new TemplateParseError(`Template directory not found: ${templateDir}`);
|
|
53118
53180
|
}
|
|
53119
|
-
const configPath =
|
|
53120
|
-
if (!
|
|
53181
|
+
const configPath = join15(templateDir, CONFIG_FILENAME);
|
|
53182
|
+
if (!existsSync11(configPath)) {
|
|
53121
53183
|
throw new TemplateParseError(`Template config not found: ${CONFIG_FILENAME}`, void 0, [
|
|
53122
53184
|
`Expected file at: ${configPath}`
|
|
53123
53185
|
]);
|
|
53124
53186
|
}
|
|
53125
|
-
const configContent = await
|
|
53187
|
+
const configContent = await readFile7(configPath, "utf-8");
|
|
53126
53188
|
let rawConfig;
|
|
53127
53189
|
try {
|
|
53128
53190
|
rawConfig = (0, import_yaml.parse)(configContent);
|
|
@@ -53148,9 +53210,9 @@ async function findTemplateFiles(dir, base = "") {
|
|
|
53148
53210
|
const files = [];
|
|
53149
53211
|
const entries = await readdir6(dir, { withFileTypes: true });
|
|
53150
53212
|
for (const entry of entries) {
|
|
53151
|
-
const relativePath = base ?
|
|
53213
|
+
const relativePath = base ? join15(base, entry.name) : entry.name;
|
|
53152
53214
|
if (entry.isDirectory()) {
|
|
53153
|
-
const subFiles = await findTemplateFiles(
|
|
53215
|
+
const subFiles = await findTemplateFiles(join15(dir, entry.name), relativePath);
|
|
53154
53216
|
files.push(...subFiles);
|
|
53155
53217
|
} else if (entry.name.endsWith(TEMPLATE_EXTENSION)) {
|
|
53156
53218
|
files.push(relativePath);
|
|
@@ -53158,22 +53220,18 @@ async function findTemplateFiles(dir, base = "") {
|
|
|
53158
53220
|
}
|
|
53159
53221
|
return files;
|
|
53160
53222
|
}
|
|
53161
|
-
async function loadTemplateByName(templatesDir, templateName) {
|
|
53162
|
-
const templateDir = join14(templatesDir, templateName);
|
|
53163
|
-
return loadTemplate(templateDir);
|
|
53164
|
-
}
|
|
53165
53223
|
async function listTemplates(templatesDir) {
|
|
53166
|
-
if (!
|
|
53224
|
+
if (!existsSync11(templatesDir)) {
|
|
53167
53225
|
return [];
|
|
53168
53226
|
}
|
|
53169
53227
|
const entries = await readdir6(templatesDir, { withFileTypes: true });
|
|
53170
53228
|
const templates = [];
|
|
53171
53229
|
for (const entry of entries) {
|
|
53172
53230
|
if (!entry.isDirectory()) continue;
|
|
53173
|
-
const configPath =
|
|
53174
|
-
if (!
|
|
53231
|
+
const configPath = join15(templatesDir, entry.name, CONFIG_FILENAME);
|
|
53232
|
+
if (!existsSync11(configPath)) continue;
|
|
53175
53233
|
try {
|
|
53176
|
-
const loaded = await loadTemplate(
|
|
53234
|
+
const loaded = await loadTemplate(join15(templatesDir, entry.name));
|
|
53177
53235
|
templates.push(loaded.config);
|
|
53178
53236
|
} catch {
|
|
53179
53237
|
}
|
|
@@ -53182,9 +53240,9 @@ async function listTemplates(templatesDir) {
|
|
|
53182
53240
|
}
|
|
53183
53241
|
|
|
53184
53242
|
// src/codegen/runner.ts
|
|
53185
|
-
import { existsSync as
|
|
53186
|
-
import { appendFile, mkdir as mkdir8, readFile as
|
|
53187
|
-
import { dirname as dirname3, join as
|
|
53243
|
+
import { existsSync as existsSync12 } from "node:fs";
|
|
53244
|
+
import { appendFile, mkdir as mkdir8, readFile as readFile8, writeFile as writeFile5 } from "node:fs/promises";
|
|
53245
|
+
import { dirname as dirname3, join as join16 } from "node:path";
|
|
53188
53246
|
|
|
53189
53247
|
// node_modules/@isaacs/balanced-match/dist/esm/index.js
|
|
53190
53248
|
var balanced = (a, b, str2) => {
|
|
@@ -59957,10 +60015,10 @@ async function executeAction(action, template, context, options2, result) {
|
|
|
59957
60015
|
}
|
|
59958
60016
|
}
|
|
59959
60017
|
async function executeAddAction(action, template, context, options2, result) {
|
|
59960
|
-
const sourcePath =
|
|
60018
|
+
const sourcePath = join16(template.templateDir, action.template);
|
|
59961
60019
|
const destRelative = renderPath(action.path, context);
|
|
59962
|
-
const destPath =
|
|
59963
|
-
if (
|
|
60020
|
+
const destPath = join16(options2.projectRoot, template.config.destination || "", destRelative);
|
|
60021
|
+
if (existsSync12(destPath)) {
|
|
59964
60022
|
if (action.skipIfExists && !options2.force) {
|
|
59965
60023
|
result.skipped.push(destRelative);
|
|
59966
60024
|
return;
|
|
@@ -59973,19 +60031,19 @@ async function executeAddAction(action, template, context, options2, result) {
|
|
|
59973
60031
|
const content = await renderFile(sourcePath, context);
|
|
59974
60032
|
if (!options2.dryRun) {
|
|
59975
60033
|
await ensureDir(dirname3(destPath));
|
|
59976
|
-
await
|
|
60034
|
+
await writeFile5(destPath, content, "utf-8");
|
|
59977
60035
|
}
|
|
59978
60036
|
result.created.push(destRelative);
|
|
59979
60037
|
}
|
|
59980
60038
|
async function executeAddManyAction(action, template, context, options2, result) {
|
|
59981
|
-
const sourceDir =
|
|
60039
|
+
const sourceDir = join16(template.templateDir, action.source);
|
|
59982
60040
|
const pattern = action.globPattern || "**/*.hbs";
|
|
59983
60041
|
const files = await glob(pattern, { cwd: sourceDir, nodir: true });
|
|
59984
60042
|
for (const file3 of files) {
|
|
59985
|
-
const sourcePath =
|
|
59986
|
-
const destRelative = renderPath(
|
|
59987
|
-
const destPath =
|
|
59988
|
-
if (
|
|
60043
|
+
const sourcePath = join16(sourceDir, file3);
|
|
60044
|
+
const destRelative = renderPath(join16(action.destination, file3), context);
|
|
60045
|
+
const destPath = join16(options2.projectRoot, template.config.destination || "", destRelative);
|
|
60046
|
+
if (existsSync12(destPath)) {
|
|
59989
60047
|
if (action.skipIfExists && !options2.force) {
|
|
59990
60048
|
result.skipped.push(destRelative);
|
|
59991
60049
|
continue;
|
|
@@ -59998,17 +60056,17 @@ async function executeAddManyAction(action, template, context, options2, result)
|
|
|
59998
60056
|
const content = await renderFile(sourcePath, context);
|
|
59999
60057
|
if (!options2.dryRun) {
|
|
60000
60058
|
await ensureDir(dirname3(destPath));
|
|
60001
|
-
await
|
|
60059
|
+
await writeFile5(destPath, content, "utf-8");
|
|
60002
60060
|
}
|
|
60003
60061
|
result.created.push(destRelative);
|
|
60004
60062
|
}
|
|
60005
60063
|
}
|
|
60006
60064
|
async function executeModifyAction(action, context, options2, result) {
|
|
60007
|
-
const filePath =
|
|
60008
|
-
if (!
|
|
60065
|
+
const filePath = join16(options2.projectRoot, renderPath(action.path, context));
|
|
60066
|
+
if (!existsSync12(filePath)) {
|
|
60009
60067
|
throw new Error(`Cannot modify: file not found: ${action.path}`);
|
|
60010
60068
|
}
|
|
60011
|
-
const content = await
|
|
60069
|
+
const content = await readFile8(filePath, "utf-8");
|
|
60012
60070
|
const replacement = renderString(action.template, context);
|
|
60013
60071
|
const pattern = new RegExp(action.pattern, "g");
|
|
60014
60072
|
const newContent = content.replace(pattern, replacement);
|
|
@@ -60017,15 +60075,15 @@ async function executeModifyAction(action, context, options2, result) {
|
|
|
60017
60075
|
return;
|
|
60018
60076
|
}
|
|
60019
60077
|
if (!options2.dryRun) {
|
|
60020
|
-
await
|
|
60078
|
+
await writeFile5(filePath, newContent, "utf-8");
|
|
60021
60079
|
}
|
|
60022
60080
|
result.modified.push(action.path);
|
|
60023
60081
|
}
|
|
60024
60082
|
async function executeAppendAction(action, context, options2, result) {
|
|
60025
|
-
const filePath =
|
|
60083
|
+
const filePath = join16(options2.projectRoot, renderPath(action.path, context));
|
|
60026
60084
|
const contentToAppend = renderString(action.template, context);
|
|
60027
|
-
if (action.unique &&
|
|
60028
|
-
const existingContent = await
|
|
60085
|
+
if (action.unique && existsSync12(filePath)) {
|
|
60086
|
+
const existingContent = await readFile8(filePath, "utf-8");
|
|
60029
60087
|
if (existingContent.includes(contentToAppend.trim())) {
|
|
60030
60088
|
result.skipped.push(action.path);
|
|
60031
60089
|
return;
|
|
@@ -60035,20 +60093,20 @@ async function executeAppendAction(action, context, options2, result) {
|
|
|
60035
60093
|
const fullContent = separator + contentToAppend;
|
|
60036
60094
|
if (!options2.dryRun) {
|
|
60037
60095
|
await ensureDir(dirname3(filePath));
|
|
60038
|
-
if (
|
|
60096
|
+
if (existsSync12(filePath)) {
|
|
60039
60097
|
await appendFile(filePath, fullContent, "utf-8");
|
|
60040
60098
|
} else {
|
|
60041
|
-
await
|
|
60099
|
+
await writeFile5(filePath, contentToAppend, "utf-8");
|
|
60042
60100
|
}
|
|
60043
60101
|
}
|
|
60044
|
-
if (
|
|
60102
|
+
if (existsSync12(filePath)) {
|
|
60045
60103
|
result.modified.push(action.path);
|
|
60046
60104
|
} else {
|
|
60047
60105
|
result.created.push(action.path);
|
|
60048
60106
|
}
|
|
60049
60107
|
}
|
|
60050
60108
|
async function ensureDir(dir) {
|
|
60051
|
-
if (!
|
|
60109
|
+
if (!existsSync12(dir)) {
|
|
60052
60110
|
await mkdir8(dir, { recursive: true });
|
|
60053
60111
|
}
|
|
60054
60112
|
}
|
|
@@ -60056,8 +60114,79 @@ async function ensureDir(dir) {
|
|
|
60056
60114
|
// src/codegen/skill-parser.ts
|
|
60057
60115
|
var import_gray_matter4 = __toESM(require_gray_matter(), 1);
|
|
60058
60116
|
|
|
60117
|
+
// src/instructions/skills/knowns.commit/SKILL.md
|
|
60118
|
+
var SKILL_default = '---\nname: knowns.commit\ndescription: Use when committing code changes with proper conventional commit format and verification\n---\n\n# Committing Changes\n\nCreate well-formatted commits following conventional commit standards.\n\n**Announce at start:** "I\'m using the knowns.commit skill to commit changes."\n\n**Core principle:** VERIFY BEFORE COMMITTING - check staged changes, ask for confirmation.\n\n## The Process\n\n### Step 1: Review Staged Changes\n\n```bash\ngit status\ngit diff --staged\n```\n\n### Step 2: Generate Commit Message\n\n**Format:**\n```\n<type>(<scope>): <message>\n\n- Bullet point summarizing change\n- Another point if needed\n```\n\n**Types:**\n\n| Type | Description |\n|------|-------------|\n| `feat` | New feature |\n| `fix` | Bug fix |\n| `docs` | Documentation only |\n| `style` | Formatting, no code change |\n| `refactor` | Code restructure |\n| `perf` | Performance improvement |\n| `test` | Adding tests |\n| `chore` | Maintenance |\n\n**Rules:**\n- Title lowercase, no period, max 50 chars\n- Scope optional but recommended\n- Body explains *why*, not just *what*\n\n### Step 3: Ask for Confirmation\n\nPresent message to user:\n\n```\nReady to commit:\n\nfeat(auth): add JWT token refresh\n\n- Added refresh token endpoint\n- Tokens expire after 1 hour\n\nProceed? (yes/no/edit)\n```\n\n**Wait for user approval.**\n\n### Step 4: Commit\n\n```bash\ngit commit -m "feat(auth): add JWT token refresh\n\n- Added refresh token endpoint\n- Tokens expire after 1 hour"\n```\n\n## Guidelines\n\n- Only commit staged files (don\'t `git add` unless asked)\n- NO "Co-Authored-By" lines\n- NO "Generated with Claude Code" ads\n- Ask before committing, never auto-commit\n\n## Examples\n\n**Good:**\n```\nfeat(api): add user profile endpoint\nfix(auth): handle expired token gracefully\ndocs(readme): update installation steps\n```\n\n**Bad:**\n```\nupdate code (too vague)\nWIP (not ready)\nfix bug (which bug?)\n```\n\n## Remember\n\n- Review staged changes first\n- Follow conventional format\n- Ask for confirmation\n- Keep messages concise\n';
|
|
60119
|
+
|
|
60120
|
+
// src/instructions/skills/knowns.doc/SKILL.md
|
|
60121
|
+
var SKILL_default2 = '---\nname: knowns.doc\ndescription: Use when working with Knowns documentation - viewing, searching, creating, or updating docs\n---\n\n# Working with Documentation\n\nNavigate, create, and update Knowns project documentation.\n\n**Announce at start:** "I\'m using the knowns.doc skill to work with documentation."\n\n**Core principle:** SEARCH BEFORE CREATING - avoid duplicates.\n\n## Quick Reference\n\n{{#if mcp}}\n```json\n// List all docs\nmcp__knowns__list_docs({})\n\n// View doc (smart mode)\nmcp__knowns__get_doc({ "path": "<path>", "smart": true })\n\n// Search docs\nmcp__knowns__search_docs({ "query": "<query>" })\n\n// Create doc\nmcp__knowns__create_doc({\n "title": "<title>",\n "description": "<description>",\n "tags": ["tag1", "tag2"],\n "folder": "folder"\n})\n\n// Update doc\nmcp__knowns__update_doc({\n "path": "<path>",\n "content": "content"\n})\n\n// Update section only\nmcp__knowns__update_doc({\n "path": "<path>",\n "section": "2",\n "content": "new section content"\n})\n```\n{{else}}\n```bash\n# List all docs\nknowns doc list --plain\n\n# View doc (auto-handles large docs)\nknowns doc "<path>" --plain\n\n# Search docs\nknowns search "<query>" --type doc --plain\n\n# Create doc\nknowns doc create "<title>" -d "<description>" -t "tags" -f "folder"\n\n# Update doc\nknowns doc edit "<path>" -c "content" # Replace\nknowns doc edit "<path>" -a "content" # Append\nknowns doc edit "<path>" --section "2" -c "content" # Section only\n```\n{{/if}}\n\n## Reading Documents\n\n{{#if mcp}}\n**Use smart mode:**\n```json\nmcp__knowns__get_doc({ "path": "<path>", "smart": true })\n```\n\n- Small doc (\u22642000 tokens) \u2192 full content\n- Large doc \u2192 stats + TOC, then request specific section\n{{else}}\n**View doc:**\n```bash\nknowns doc "<path>" --plain\n```\n\nFor large docs, use sections:\n```bash\nknowns doc "<path>" --toc --plain\nknowns doc "<path>" --section "2" --plain\n```\n{{/if}}\n\n## Creating Documents\n\n### Step 1: Search First\n\n{{#if mcp}}\n```json\nmcp__knowns__search_docs({ "query": "<topic>" })\n```\n{{else}}\n```bash\nknowns search "<topic>" --type doc --plain\n```\n{{/if}}\n\n**Don\'t duplicate.** Update existing docs when possible.\n\n### Step 2: Choose Location\n\n| Doc Type | Location | Folder |\n|----------|----------|--------|\n| Core (README, ARCH) | Root | (none) |\n| Guide | `guides/` | `guides` |\n| Pattern | `patterns/` | `patterns` |\n| API doc | `api/` | `api` |\n\n### Step 3: Create\n\n{{#if mcp}}\n```json\nmcp__knowns__create_doc({\n "title": "<title>",\n "description": "<brief description>",\n "tags": ["tag1", "tag2"],\n "folder": "folder"\n})\n```\n{{else}}\n```bash\nknowns doc create "<title>" \\\n -d "<brief description>" \\\n -t "tag1,tag2" \\\n -f "folder" # optional\n```\n{{/if}}\n\n### Step 4: Add Content\n\n{{#if mcp}}\n```json\nmcp__knowns__update_doc({\n "path": "<path>",\n "content": "# Title\\n\\n## 1. Overview\\nWhat this doc covers.\\n\\n## 2. Details\\nMain content."\n})\n```\n{{else}}\n```bash\nknowns doc edit "<path>" -c "$(cat <<\'EOF\'\n# Title\n\n## 1. Overview\nWhat this doc covers.\n\n## 2. Details\nMain content.\n\n## 3. Examples\nPractical examples.\nEOF\n)"\n```\n{{/if}}\n\n## Updating Documents\n\n### View First\n\n{{#if mcp}}\n```json\nmcp__knowns__get_doc({ "path": "<path>", "smart": true })\nmcp__knowns__get_doc({ "path": "<path>", "toc": true })\n```\n{{else}}\n```bash\nknowns doc "<path>" --plain\nknowns doc "<path>" --toc --plain # For large docs\n```\n{{/if}}\n\n### Update Methods\n\n| Method | Use When |\n|--------|----------|\n| Replace all | Rewriting entire doc |\n| Append | Adding to end |\n| Section edit | Updating one section |\n\n**Section edit is most efficient** - less context, safer.\n\n{{#if mcp}}\n```json\n// Update just section 3\nmcp__knowns__update_doc({\n "path": "<path>",\n "section": "3",\n "content": "## 3. New Content\\n\\nUpdated section content..."\n})\n```\n{{else}}\n```bash\n# Update just section 3\nknowns doc edit "<path>" --section "3" -c "## 3. New Content\n\nUpdated section content..."\n```\n{{/if}}\n\n## Document Structure\n\nUse numbered headings for section editing to work:\n\n```markdown\n# Title (H1 - only one)\n\n## 1. Overview\n...\n\n## 2. Installation\n...\n\n## 3. Configuration\n...\n```\n\n## Remember\n\n- Search before creating (avoid duplicates)\n- Use smart mode when reading\n- Use section editing for targeted updates\n- Use numbered headings\n- Reference docs with `@doc/<path>`\n';
|
|
60122
|
+
|
|
60123
|
+
// src/instructions/skills/knowns.extract/SKILL.md
|
|
60124
|
+
var SKILL_default3 = '---\nname: knowns.extract\ndescription: Use when extracting reusable patterns, solutions, or knowledge into documentation\n---\n\n# Extracting Knowledge\n\nConvert implementations, patterns, or solutions into reusable project documentation.\n\n**Announce at start:** "I\'m using the knowns.extract skill to extract knowledge."\n\n**Core principle:** ONLY EXTRACT GENERALIZABLE KNOWLEDGE.\n\n## The Process\n\n### Step 1: Identify Source\n\n**From task (if ID provided):**\n{{#if mcp}}\n```json\nmcp__knowns__get_task({ "taskId": "$ARGUMENTS" })\n```\n{{else}}\n```bash\nknowns task $ARGUMENTS --plain\n```\n{{/if}}\n\n**From current context (no arguments):**\n- Recent implementation work\n- Patterns discovered during research\n- Solutions found in conversation\n\nLook for:\n- Implementation patterns used\n- Problems solved\n- Decisions made\n- Lessons learned\n\n### Step 2: Identify Extractable Knowledge\n\n**Good candidates for extraction:**\n- Reusable code patterns\n- Error handling approaches\n- Integration patterns\n- Performance solutions\n- Security practices\n- API design decisions\n\n**NOT good for extraction:**\n- Task-specific details\n- One-time fixes\n- Context-dependent solutions\n\n### Step 3: Search for Existing Docs\n\n{{#if mcp}}\n```json\n// Check if pattern already documented\nmcp__knowns__search_docs({ "query": "<pattern/topic>" })\n\n// List related docs\nmcp__knowns__list_docs({ "tag": "pattern" })\n```\n{{else}}\n```bash\n# Check if pattern already documented\nknowns search "<pattern/topic>" --type doc --plain\n\n# List related docs\nknowns doc list --tag pattern --plain\n```\n{{/if}}\n\n**Don\'t duplicate.** Update existing docs when possible.\n\n### Step 4: Create or Update Documentation\n\n**If new pattern - create doc:**\n\n{{#if mcp}}\n```json\nmcp__knowns__create_doc({\n "title": "Pattern: <Name>",\n "description": "Reusable pattern for <purpose>",\n "tags": ["pattern", "<domain>"],\n "folder": "patterns"\n})\n```\n{{else}}\n```bash\nknowns doc create "Pattern: <Name>" \\\n -d "Reusable pattern for <purpose>" \\\n -t "pattern,<domain>" \\\n -f "patterns"\n```\n{{/if}}\n\n**Add content:**\n\n{{#if mcp}}\n```json\nmcp__knowns__update_doc({\n "path": "patterns/<name>",\n "content": "# Pattern: <Name>\\n\\n## 1. Problem\\nWhat problem this pattern solves.\\n\\n## 2. Solution\\nHow to implement the pattern.\\n\\n## 3. Example\\n```typescript\\n// Code example\\n```\\n\\n## 4. When to Use\\n- Situation 1\\n\\n## 5. Source\\nDiscovered in @task-<id>"\n})\n```\n{{else}}\n```bash\nknowns doc edit "patterns/<name>" -c "$(cat <<\'EOF\'\n# Pattern: <Name>\n\n## 1. Problem\nWhat problem this pattern solves.\n\n## 2. Solution\nHow to implement the pattern.\n\n## 3. Example\n```typescript\n// Code example\n```\n\n## 4. When to Use\n- Situation 1\n- Situation 2\n\n## 5. Source\nDiscovered in @task-<id> (or describe context)\nEOF\n)"\n```\n{{/if}}\n\n**If updating existing doc:**\n\n{{#if mcp}}\n```json\nmcp__knowns__update_doc({\n "path": "<path>",\n "appendContent": "\\n\\n## Additional: <Topic>\\n\\n<new insight or example>"\n})\n```\n{{else}}\n```bash\nknowns doc edit "<path>" -a "\n\n## Additional: <Topic>\n\n<new insight or example>\n"\n```\n{{/if}}\n\n### Step 5: Create Template (if code-generatable)\n\nIf the pattern involves repeatable code structure, create a codegen template:\n\n```bash\n# Create template skeleton\nknowns template create <pattern-name>\n```\n\n**Update template config** (`.knowns/templates/<pattern-name>/_template.yaml`):\n\n```yaml\nname: <pattern-name>\ndescription: Generate <what it creates>\ndoc: patterns/<pattern-name> # Link to the doc you just created\n\nprompts:\n - name: name\n message: Name?\n validate: required\n\nfiles:\n - template: "{{name}}.ts.hbs"\n destination: "src/{{kebabCase name}}.ts"\n```\n\n**Create template files** (`.hbs` files with Handlebars):\n\n```handlebars\n// {{name}}.ts.hbs\nexport class {{pascalCase name}} {\n // Pattern implementation\n}\n```\n\n**Link template in doc:**\n\n{{#if mcp}}\n```json\nmcp__knowns__update_doc({\n "path": "patterns/<name>",\n "appendContent": "\\n\\n## Generate\\n\\nUse @template/<pattern-name> to generate this pattern."\n})\n```\n{{else}}\n```bash\nknowns doc edit "patterns/<name>" -a "\n\n## Generate\n\nUse @template/<pattern-name> to generate this pattern.\n"\n```\n{{/if}}\n\n### Step 6: Link Back (if from task)\n\n```bash\nknowns task edit $ARGUMENTS --append-notes "\u{1F4DA} Extracted to @doc/patterns/<name>"\nknowns task edit $ARGUMENTS --append-notes "\u{1F527} Template: @template/<pattern-name>"\n```\n\n## What to Extract\n\n| Source | Extract As | Create Template? |\n|--------|------------|------------------|\n| Code pattern | Pattern doc | \u2705 Yes |\n| Component structure | Pattern doc | \u2705 Yes |\n| API endpoint pattern | Integration guide | \u2705 Yes |\n| Error solution | Troubleshooting guide | \u274C No |\n| Performance fix | Performance patterns | \u274C Usually no |\n| Security approach | Security guidelines | \u274C No |\n\n**Create template when:**\n- Pattern is repeatable (will be used multiple times)\n- Has consistent file structure\n- Can be parameterized (name, type, etc.)\n\n## Document Templates\n\n### Pattern Template\n```markdown\n# Pattern: <Name>\n\n## Problem\nWhat this solves.\n\n## Solution\nHow to implement.\n\n## Example\nWorking code.\n\n## When to Use\nWhen to apply this pattern.\n```\n\n### Guide Template\n```markdown\n# Guide: <Topic>\n\n## Overview\nWhat this covers.\n\n## Steps\n1. Step one\n2. Step two\n\n## Common Issues\n- Issue and solution\n```\n\n## Quality Checklist\n\n- [ ] Knowledge is generalizable (not task-specific)\n- [ ] Includes working example\n- [ ] Explains when to use\n- [ ] Links back to source (if applicable)\n- [ ] Tagged appropriately\n- [ ] Template created (if code-generatable)\n- [ ] Doc links to template (`@template/...`)\n- [ ] Template links to doc (`doc:` in config)\n\n## Remember\n\n- Only extract generalizable knowledge\n- Search before creating (avoid duplicates)\n- Include practical examples\n- Reference source when available\n- Tag docs for discoverability\n- **Create template for repeatable code patterns**\n- **Link doc \u2194 template bidirectionally**\n';
|
|
60125
|
+
|
|
60126
|
+
// src/instructions/skills/knowns.init/SKILL.md
|
|
60127
|
+
var SKILL_default4 = '---\nname: knowns.init\ndescription: Use at the start of a new session to read project docs, understand context, and see current state\n---\n\n# Session Initialization\n\nInitialize a session by reading project documentation and understanding current state.\n\n**Announce at start:** "I\'m using the knowns.init skill to initialize this session."\n\n**Core principle:** READ DOCS BEFORE DOING ANYTHING ELSE.\n\n## The Process\n\n### Step 1: List Available Documentation\n\n{{#if mcp}}\n```json\nmcp__knowns__list_docs({})\n```\n{{else}}\n```bash\nknowns doc list --plain\n```\n{{/if}}\n\n### Step 2: Read Core Documents\n\n**Priority order:**\n\n{{#if mcp}}\n```json\n// 1. Project overview (always read)\nmcp__knowns__get_doc({ "path": "README", "smart": true })\n\n// 2. Architecture (if exists)\nmcp__knowns__get_doc({ "path": "ARCHITECTURE", "smart": true })\n\n// 3. Conventions (if exists)\nmcp__knowns__get_doc({ "path": "CONVENTIONS", "smart": true })\n```\n{{else}}\n```bash\n# 1. Project overview (always read)\nknowns doc "README" --plain\n\n# 2. Architecture (if exists)\nknowns doc "ARCHITECTURE" --plain\n\n# 3. Conventions (if exists)\nknowns doc "CONVENTIONS" --plain\n```\n{{/if}}\n\n### Step 3: Check Current State\n\n{{#if mcp}}\n```json\n// Active timer?\nmcp__knowns__get_time_report({})\n\n// Tasks in progress\nmcp__knowns__list_tasks({ "status": "in-progress" })\n\n// Board overview\nmcp__knowns__get_board({})\n```\n{{else}}\n```bash\n# Active timer?\nknowns time status\n\n# Tasks in progress\nknowns task list --status in-progress --plain\n\n# High priority todos\nknowns task list --status todo --plain | head -20\n```\n{{/if}}\n\n### Step 4: Summarize Context\n\nProvide a brief summary:\n\n```markdown\n## Session Context\n\n### Project\n- **Name**: [from config]\n- **Purpose**: [from README]\n\n### Key Docs Available\n- README: [brief note]\n- ARCHITECTURE: [if exists]\n- CONVENTIONS: [if exists]\n\n### Current State\n- Tasks in progress: [count]\n- Active timer: [yes/no]\n\n### Ready for\n- Working on tasks\n- Creating documentation\n- Answering questions about codebase\n```\n\n## Quick Commands After Init\n\n```\n# Work on a task\n/knowns.task <id>\n\n# Search for something\n{{#if mcp}}\nmcp__knowns__search_docs({ "query": "<query>" })\n{{else}}\nknowns search "<query>" --plain\n{{/if}}\n```\n\n## When to Re-Initialize\n\n**Run init again when:**\n- Starting a new session\n- Major project changes occurred\n- Switching to different area of project\n- Context feels stale\n\n## What to Learn from Docs\n\nFrom **README**:\n- Project purpose and scope\n- Key features\n- Getting started info\n\nFrom **ARCHITECTURE**:\n- System design\n- Component structure\n- Key decisions\n\nFrom **CONVENTIONS**:\n- Coding standards\n- Naming conventions\n- File organization\n\n## Remember\n\n- Always read docs first\n- Check for active work (in-progress tasks)\n- Summarize context for reference\n- Re-init when switching areas\n';
|
|
60128
|
+
|
|
60129
|
+
// src/instructions/skills/knowns.research/SKILL.md
|
|
60130
|
+
var SKILL_default5 = '---\nname: knowns.research\ndescription: Use when you need to understand existing code, find patterns, or explore the codebase before implementation\n---\n\n# Researching the Codebase\n\nUnderstand existing patterns and implementation before making changes.\n\n**Announce at start:** "I\'m using the knowns.research skill to research [topic]."\n\n**Core principle:** UNDERSTAND WHAT EXISTS BEFORE ADDING NEW CODE.\n\n## The Process\n\n### Step 1: Search Documentation\n\n{{#if mcp}}\n```json\n// Search docs for topic\nmcp__knowns__search_docs({ "query": "<topic>" })\n\n// Read relevant docs\nmcp__knowns__get_doc({ "path": "<path>", "smart": true })\n```\n{{else}}\n```bash\n# Search docs for topic\nknowns search "<topic>" --type doc --plain\n\n# Read relevant docs\nknowns doc "<path>" --plain\n```\n{{/if}}\n\n### Step 2: Search Completed Tasks\n\n{{#if mcp}}\n```json\n// Find similar work that was done\nmcp__knowns__search_tasks({ "query": "<keywords>" })\n\n// View task for implementation details\nmcp__knowns__get_task({ "taskId": "<id>" })\n```\n{{else}}\n```bash\n# Find similar work that was done\nknowns search "<keywords>" --type task --status done --plain\n\n# View task for implementation details\nknowns task <id> --plain\n```\n{{/if}}\n\n**Learn from history** - completed tasks often contain valuable insights.\n\n### Step 3: Search Codebase\n\n```bash\n# Find files by name pattern\nfind . -name "*<pattern>*" -type f | grep -v node_modules | head -20\n\n# Search code content\ngrep -r "<pattern>" --include="*.ts" --include="*.tsx" -l | head -20\n```\n\n### Step 4: Analyze Patterns\n\nLook for:\n- How similar features are implemented\n- Common patterns used\n- File/folder structure conventions\n- Naming conventions\n- Error handling patterns\n\n### Step 5: Document Findings\n\n```markdown\n## Research: [Topic]\n\n### Existing Implementations\n- `src/path/file.ts`: Does X\n- `src/path/other.ts`: Handles Y\n\n### Patterns Found\n- Pattern 1: Used for...\n- Pattern 2: Applied when...\n\n### Related Docs\n- @doc/path1 - Covers X\n- @doc/path2 - Explains Y\n\n### Recommendations\nBased on research:\n1. Reuse X from Y\n2. Follow pattern Z\n3. Avoid approach W because...\n```\n\n## Research Checklist\n\n- [ ] Searched documentation\n- [ ] Reviewed similar completed tasks\n- [ ] Found existing code patterns\n- [ ] Identified reusable components\n- [ ] Noted conventions to follow\n\n## After Research\n\nUse findings in task:\n{{#if mcp}}\n```json\n// Create informed task\nmcp__knowns__create_task({\n "title": "<title>",\n "description": "Based on research: use pattern from X"\n})\n```\n{{else}}\n```bash\n# Create informed task\nknowns task create "<title>" \\\n -d "Based on research: use pattern from X" \\\n --ac "Follow pattern in src/..." \\\n --ac "Reuse component Y"\n\n# Or update existing task plan\nknowns task edit <id> --plan $\'1. Based on research...\n2. Reuse pattern from...\'\n```\n{{/if}}\n\n## What to Look For\n\n| Looking For | Where to Check |\n|-------------|----------------|\n| Conventions | @doc/CONVENTIONS, existing code |\n| Patterns | @doc/patterns/*, similar features |\n| Utilities | src/utils/*, src/lib/* |\n| Examples | Completed tasks, tests |\n| API design | Existing endpoints, @doc/api/* |\n\n## When to Research\n\n**Always research before:**\n- Implementing new features\n- Adding new patterns\n- Making architectural decisions\n\n**Skip research for:**\n- Simple bug fixes with clear cause\n- Trivial changes following obvious patterns\n\n## Remember\n\n- Check docs and tasks first\n- Look at how similar things are done\n- Note file locations for reference\n- Look at tests for expected behavior\n- Document findings for future reference\n';
|
|
60131
|
+
|
|
60132
|
+
// src/instructions/skills/knowns.task.brainstorm/SKILL.md
|
|
60133
|
+
var SKILL_default6 = '---\nname: knowns.task.brainstorm\ndescription: Use when requirements are unclear, multiple approaches exist, or you need to explore solutions before planning\n---\n\n# Brainstorming for Tasks\n\nConvert vague requirements into concrete design through structured questioning and exploration.\n\n**Announce at start:** "I\'m using the knowns.task.brainstorm skill to explore approaches."\n\n**Core principle:** UNDERSTAND THE PROBLEM BEFORE PROPOSING SOLUTIONS.\n\n## The Process\n\n### Phase 1: Discovery\n\n**One question at a time.** Don\'t overwhelm with multiple questions.\n\nPrefer multiple-choice when possible:\n```\nWhich approach do you prefer?\nA) Quick solution with trade-offs\nB) Comprehensive solution, more effort\nC) Something else (describe)\n```\n\nQuestions to clarify:\n- What problem are we solving?\n- Who are the users/stakeholders?\n- What are the constraints?\n- What does success look like?\n\n### Phase 2: Research Existing Patterns\n\n{{#if mcp}}\n```json\n// Search docs for related patterns\nmcp__knowns__search_docs({ "query": "<topic>" })\n\n// Check how similar things were done\nmcp__knowns__search_tasks({ "query": "<keywords>" })\n```\n{{else}}\n```bash\n# Search docs for related patterns\nknowns search "<topic>" --type doc --plain\n\n# Check how similar things were done\nknowns search "<keywords>" --type task --status done --plain\n```\n{{/if}}\n\n**Learn from history** - completed tasks often contain implementation insights.\n\n### Phase 3: Explore Approaches\n\nPresent 2-3 options with trade-offs:\n\n```markdown\n## Option A: [Name]\n- **Approach**: Brief description\n- **Pros**: What\'s good\n- **Cons**: What\'s challenging\n- **Effort**: Low/Medium/High\n\n## Option B: [Name]\n- **Approach**: Brief description\n- **Pros**: What\'s good\n- **Cons**: What\'s challenging\n- **Effort**: Low/Medium/High\n```\n\n**Lead with your recommendation** and explain why.\n\n### Phase 4: Validate and Document\n\nAfter agreement:\n- Summarize the chosen approach\n- Identify potential risks\n- Define acceptance criteria\n\nIf creating a new task:\n{{#if mcp}}\n```json\nmcp__knowns__create_task({\n "title": "<title>",\n "description": "Based on brainstorm: <key decisions>",\n "acceptanceCriteria": ["Criterion 1", "Criterion 2"]\n})\n```\n{{else}}\n```bash\nknowns task create "<title>" \\\n -d "Based on brainstorm: <key decisions>" \\\n --ac "Criterion 1" \\\n --ac "Criterion 2"\n```\n{{/if}}\n\nIf updating existing task:\n{{#if mcp}}\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "description": "Updated based on brainstorm..."\n})\n```\n{{else}}\n```bash\nknowns task edit $ARGUMENTS -d "Updated based on brainstorm..."\n```\n{{/if}}\n\n## When to Use This Skill\n\n**Good candidates:**\n- Vague requirements ("make it faster", "improve UX")\n- Multiple valid approaches exist\n- Significant effort involved\n- New territory for the project\n\n**Skip for:**\n- Clear, well-defined tasks\n- Bug fixes with obvious solutions\n- Simple additions following existing patterns\n\n## Red Flags\n\n**You\'re doing it wrong if:**\n- Proposing solutions before understanding the problem\n- Asking too many questions at once\n- Not researching existing patterns first\n- Skipping trade-off analysis\n\n## Remember\n\n- One question at a time\n- Research existing patterns first\n- Present options with trade-offs\n- Lead with your recommendation\n- Document the decision\n';
|
|
60134
|
+
|
|
60135
|
+
// src/instructions/skills/knowns.task.implement/SKILL.md
|
|
60136
|
+
var SKILL_default7 = '---\nname: knowns.task.implement\ndescription: Use when implementing a task - follow the plan, check ACs, track progress\n---\n\n# Implementing a Task\n\nExecute the implementation plan, track progress, and complete the task.\n\n**Announce at start:** "I\'m using the knowns.task.implement skill to implement task [ID]."\n\n**Core principle:** CHECK AC ONLY AFTER WORK IS DONE.\n\n## The Process\n\n### Step 1: Review Current State\n\n{{#if mcp}}\n```json\nmcp__knowns__get_task({ "taskId": "$ARGUMENTS" })\n```\n{{else}}\n```bash\nknowns task $ARGUMENTS --plain\n```\n{{/if}}\n\nVerify:\n- Plan exists and is approved\n- Timer is running\n- Know which ACs are pending\n\n### Step 2: Check for Applicable Templates\n\nBefore writing code, check if there\'s a template that matches:\n\n```bash\nknowns template list\n```\n\n**If template exists:**\n1. Read linked doc for context\n2. Use template to generate boilerplate\n3. Customize generated code as needed\n\n{{#if mcp}}\n```json\n// Read template\'s linked doc\nmcp__knowns__get_doc({ "path": "<template-doc>", "smart": true })\n```\n```bash\n# Generate code from template (reduces context, ensures consistency)\nknowns template run <template-name> --name "MyComponent"\n```\n{{else}}\n```bash\n# Read template\'s linked doc\nknowns doc "<template-doc>" --plain\n\n# Generate code from template (reduces context, ensures consistency)\nknowns template run <template-name> --name "MyComponent"\n```\n{{/if}}\n\n**Why use templates:**\n- Reduces context (no need to generate boilerplate)\n- Ensures consistency with project patterns\n- Faster implementation\n\n### Step 3: Work Through Plan\n\nFor each step in the plan:\n\n1. **Check for template** (use if available)\n2. **Do the work** (generate or write code)\n3. **Check related AC** (only after work is done!)\n4. **Append progress note**\n\n{{#if mcp}}\n```json\n// After completing work for AC #1:\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "checkAc": [1],\n "appendNotes": "\u2713 Done: brief description"\n})\n```\n{{else}}\n```bash\n# After completing work for AC #1:\nknowns task edit $ARGUMENTS --check-ac 1\nknowns task edit $ARGUMENTS --append-notes "\u2713 Done: brief description"\n```\n{{/if}}\n\n### Step 4: Handle Scope Changes\n\nIf new requirements emerge during implementation:\n\n**Small change:**\n{{#if mcp}}\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "addAc": ["New requirement"],\n "appendNotes": "\u26A0\uFE0F Scope: added requirement per user"\n})\n```\n{{else}}\n```bash\nknowns task edit $ARGUMENTS --ac "New requirement"\nknowns task edit $ARGUMENTS --append-notes "\u26A0\uFE0F Scope: added requirement per user"\n```\n{{/if}}\n\n**Large change:**\n- Stop and ask user\n- Consider creating follow-up task\n- Update plan if needed\n\n### Step 5: Verify & Complete\n\nWhen all ACs are checked:\n\n**1. Verify code quality:**\n```bash\nnpm test # or project\'s test command\nnpm run lint # or project\'s lint command\nnpm run build # if applicable\n```\n\n**Don\'t complete if verification fails.** Fix issues first.\n\n**2. Add implementation notes (REQUIRED for audit):**\n\nDocument all changes made for audit trail.\n\n> \u26A0\uFE0F **CRITICAL**: Use `appendNotes` (NOT `notes`). Using `notes` will DESTROY the audit trail!\n\n{{#if mcp}}\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "appendNotes": "## Implementation Complete\\n\\n### Files Changed\\n- `src/path/file.ts` - Added X\\n- `src/path/other.ts` - Modified Y\\n\\n### Key Changes\\n- Change 1: description\\n\\n### Testing\\n- Test coverage / manual testing done"\n})\n```\n{{else}}\n```bash\nknowns task edit $ARGUMENTS --append-notes $\'\n## Implementation Complete\n\n### Files Changed\n- `src/path/file.ts` - Added X\n- `src/path/other.ts` - Modified Y\n- `tests/file.test.ts` - Added tests\n\n### Key Changes\n- Change 1: description\n- Change 2: description\n\n### Testing\n- Test coverage / manual testing done\n\'\n```\n{{/if}}\n\n**IMPORTANT:** Always use `appendNotes` (not `notes`) to preserve audit trail.\n\n**3. Stop timer and mark done:**\n\n{{#if mcp}}\n```json\nmcp__knowns__stop_time({ "taskId": "$ARGUMENTS" })\n```\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "status": "done"\n})\n```\n{{else}}\n```bash\nknowns time stop\nknowns task edit $ARGUMENTS -s done\n```\n{{/if}}\n\n### Step 6: Consider Knowledge Extraction\n\nIf generalizable patterns were discovered:\n\n```\n/knowns.extract $ARGUMENTS\n```\n\n## Progress Tracking\n\nUse concise notes:\n\n{{#if mcp}}\n```json\n// Good\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "appendNotes": "\u2713 Auth middleware implemented"\n})\n\n// Bad (too verbose)\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "appendNotes": "I have successfully completed..."\n})\n```\n{{else}}\n```bash\n# Good\nknowns task edit $ARGUMENTS --append-notes "\u2713 Auth middleware implemented"\n\n# Bad (too verbose)\nknowns task edit $ARGUMENTS --append-notes "I have successfully completed..."\n```\n{{/if}}\n\n## Completion Checklist\n\n- [ ] All ACs checked\n- [ ] Tests pass\n- [ ] Lint clean\n- [ ] Implementation notes added (with file changes for audit)\n- [ ] Timer stopped\n- [ ] Status set to `done`\n- [ ] Knowledge extracted (if applicable)\n\n## Red Flags\n\n**You\'re doing it wrong if:**\n- Checking AC before work is actually complete\n- Making changes not in the approved plan (without asking)\n- Skipping tests\n- Not tracking progress with notes\n- Marking done without verification\n\n## When to Stop\n\n**STOP and ask when:**\n- Requirements unclear or contradictory\n- Approach isn\'t working after 2-3 attempts\n- Need changes outside approved scope\n- Hit unexpected blocker\n\n## If Verification Fails\n\n**Tests failing:**\n1. Keep task in-progress\n2. Fix the issue\n3. Re-run verification\n\n**Forgot to stop timer:**\n{{#if mcp}}\n```json\nmcp__knowns__add_time({\n "taskId": "$ARGUMENTS",\n "duration": "<duration>",\n "note": "Timer correction"\n})\n```\n{{else}}\n```bash\nknowns time add $ARGUMENTS <duration> -n "Timer correction"\n```\n{{/if}}\n\n## Remember\n\n- Check AC only AFTER work is done\n- Use templates when available\n- Track progress with notes\n- Ask before scope changes\n- Follow the approved plan\n- Verify before marking done\n- Always stop the timer\n- Consider knowledge extraction\n';
|
|
60137
|
+
|
|
60138
|
+
// src/instructions/skills/knowns.task.plan/SKILL.md
|
|
60139
|
+
var SKILL_default8 = '---\nname: knowns.task.plan\ndescription: Use when creating an implementation plan for a task\n---\n\n# Planning a Task\n\nTake ownership, gather context, create implementation plan, and get user approval.\n\n**Announce at start:** "I\'m using the knowns.task.plan skill to plan task [ID]."\n\n**Core principle:** GATHER CONTEXT \u2192 PLAN \u2192 WAIT FOR APPROVAL.\n\n## The Process\n\n### Step 1: View Task & Take Ownership\n\n{{#if mcp}}\n```json\nmcp__knowns__get_task({ "taskId": "$ARGUMENTS" })\n```\n\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "status": "in-progress",\n "assignee": "@me"\n})\n```\n\n```json\nmcp__knowns__start_time({ "taskId": "$ARGUMENTS" })\n```\n{{else}}\n```bash\nknowns task $ARGUMENTS --plain\nknowns task edit $ARGUMENTS -s in-progress -a @me\nknowns time start $ARGUMENTS\n```\n{{/if}}\n\n**Timer is mandatory.** Time data is used for estimation.\n\n### Step 2: Gather Context\n\n**Follow all refs in task:**\n\n{{#if mcp}}\n```json\n// @doc/<path> \u2192\nmcp__knowns__get_doc({ "path": "<path>", "smart": true })\n\n// @task-<id> \u2192\nmcp__knowns__get_task({ "taskId": "<id>" })\n```\n{{else}}\n```bash\n# @doc/<path> \u2192\nknowns doc "<path>" --plain\n\n# @task-<id> \u2192\nknowns task <id> --plain\n```\n{{/if}}\n\n**Search for related context:**\n\n{{#if mcp}}\n```json\nmcp__knowns__search_docs({ "query": "<keywords>" })\nmcp__knowns__search_tasks({ "query": "<keywords>" })\n```\n{{else}}\n```bash\nknowns search "<keywords>" --type doc --plain\nknowns search "<keywords>" --type task --status done --plain\n```\n{{/if}}\n\n**Check for templates:**\n\n```bash\nknowns template list\n```\n\n### Step 3: Draft Implementation Plan\n\nStructure your plan:\n\n```markdown\n## Implementation Plan\n\n1. [Step] (see @doc/relevant-doc)\n2. [Step] (use @template/xxx if available)\n3. Add tests\n4. Update documentation\n```\n\n**Plan guidelines:**\n- Reference relevant docs with `@doc/<path>`\n- Reference templates with `@template/<name>`\n- Include testing step\n- Include doc updates if needed\n- Keep steps actionable and specific\n\n### Step 4: Present to User\n\nShow the plan and **ASK for approval**:\n\n```markdown\nHere\'s my implementation plan for task [ID]:\n\n1. Step one (see @doc/xxx)\n2. Generate boilerplate with @template/xxx\n3. Customize implementation\n4. Add unit tests\n5. Update API docs\n\nShall I proceed with this plan?\n```\n\n**WAIT for explicit approval.**\n\n### Step 5: Save Plan (after approval)\n\n> \u26A0\uFE0F **Use `appendNotes` (NOT `notes`)** to preserve audit trail:\n\n{{#if mcp}}\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "plan": "1. Step one (see @doc/xxx)\\n2. Step two\\n3. Add unit tests\\n4. Update API docs",\n "appendNotes": "\u{1F4CB} Plan approved, starting implementation"\n})\n```\n{{else}}\n```bash\nknowns task edit $ARGUMENTS --plan $\'1. Step one (see @doc/xxx)\n2. Step two\n3. Add unit tests\n4. Update API docs\'\nknowns task edit $ARGUMENTS --append-notes "\u{1F4CB} Plan approved, starting implementation"\n```\n{{/if}}\n\n## Plan Quality Checklist\n\n- [ ] Task ownership taken (status: in-progress)\n- [ ] Timer started\n- [ ] All refs followed\n- [ ] Related docs/tasks searched\n- [ ] Templates identified (if any)\n- [ ] Steps are specific and actionable\n- [ ] Includes relevant doc/template references\n- [ ] Includes testing\n- [ ] User has approved\n\n## Next Step\n\nAfter plan is approved:\n\n```\n/knowns.task.implement $ARGUMENTS\n```\n\n## When Plan Isn\'t Clear\n\nIf requirements are unclear or multiple approaches exist:\n\n```\n/knowns.task.brainstorm $ARGUMENTS\n```\n\n## Remember\n\n- Take ownership and start timer first\n- Gather context before planning\n- Check for templates to use\n- Never implement without approved plan\n- Reference docs and templates in the plan\n';
|
|
60140
|
+
|
|
60141
|
+
// src/instructions/skills/knowns.task.reopen/SKILL.md
|
|
60142
|
+
var SKILL_default9 = '---\nname: knowns.task.reopen\ndescription: Use when reopening a completed task to add new requirements, fix issues, or extend functionality\n---\n\n# Reopening Tasks\n\nReopen completed tasks properly with time tracking and requirement documentation.\n\n**Announce at start:** "I\'m using the knowns.task.reopen skill to reopen task [ID]."\n\n**Core principle:** DOCUMENT WHY THE TASK IS REOPENED.\n\n## The Process\n\n### Step 1: View Current Task State\n\n{{#if mcp}}\n```json\nmcp__knowns__get_task({ "taskId": "$ARGUMENTS" })\n```\n{{else}}\n```bash\nknowns task $ARGUMENTS --plain\n```\n{{/if}}\n\nVerify:\n- Task is currently `done`\n- Understand what was implemented\n- Review implementation notes\n\n### Step 2: Reopen and Start Timer\n\n{{#if mcp}}\n```json\n// Set back to in-progress\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "status": "in-progress"\n})\n\n// Start timer (REQUIRED)\nmcp__knowns__start_time({ "taskId": "$ARGUMENTS" })\n```\n{{else}}\n```bash\n# Set back to in-progress\nknowns task edit $ARGUMENTS -s in-progress\n\n# Start timer (REQUIRED)\nknowns time start $ARGUMENTS\n```\n{{/if}}\n\n### Step 3: Document Reopen Reason\n\n> \u26A0\uFE0F **Use `appendNotes` (NOT `notes`)** to preserve existing audit trail:\n\n{{#if mcp}}\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "appendNotes": "\u{1F504} Reopened: <reason>"\n})\n```\n{{else}}\n```bash\nknowns task edit $ARGUMENTS --append-notes "\u{1F504} Reopened: <reason>"\n```\n{{/if}}\n\n**Common reasons:**\n- User requested changes\n- Bug found in implementation\n- New requirements added\n- Missed acceptance criteria\n\n### Step 4: Add New Requirements\n\n{{#if mcp}}\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "addAc": ["New requirement 1", "Fix: issue description"]\n})\n```\n{{else}}\n```bash\n# Add new acceptance criteria\nknowns task edit $ARGUMENTS --ac "New requirement 1"\nknowns task edit $ARGUMENTS --ac "Fix: issue description"\n```\n{{/if}}\n\n### Step 5: Update Plan (if needed)\n\n{{#if mcp}}\n```json\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "plan": "Previous plan + new steps:\\n1. Original step (done)\\n2. Original step (done)\\n3. NEW: Address new requirement\\n4. NEW: Fix reported issue"\n})\n```\n{{else}}\n```bash\nknowns task edit $ARGUMENTS --plan $\'Previous plan + new steps:\n1. Original step (done)\n2. Original step (done)\n3. NEW: Address new requirement\n4. NEW: Fix reported issue\'\n```\n{{/if}}\n\n**Present updated plan and WAIT for approval.**\n\n### Step 6: Implement and Complete\n\nFollow normal task completion flow:\n\n{{#if mcp}}\n```json\n// Check new ACs as completed\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "checkAc": [<new-index>],\n "appendNotes": "\u2713 Done: new requirement"\n})\n\n// Stop timer\nmcp__knowns__stop_time({ "taskId": "$ARGUMENTS" })\n\n// Mark done again\nmcp__knowns__update_task({\n "taskId": "$ARGUMENTS",\n "status": "done"\n})\n```\n{{else}}\n```bash\n# Check new ACs as completed\nknowns task edit $ARGUMENTS --check-ac <new-index>\nknowns task edit $ARGUMENTS --append-notes "\u2713 Done: new requirement"\n\n# Stop timer\nknowns time stop\n\n# Mark done again\nknowns task edit $ARGUMENTS -s done\n```\n{{/if}}\n\n## When to Reopen vs Create New Task\n\n| Reopen Existing | Create New Task |\n|-----------------|-----------------|\n| Small fix/change | Major new feature |\n| Related to original work | Unrelated work |\n| Same context needed | Different context |\n| Quick addition | Significant scope |\n\n**Rule of thumb:** If it takes < 30 mins and relates to original task, reopen. Otherwise, create new task with reference.\n\n## Creating Follow-up Task Instead\n\n{{#if mcp}}\n```json\nmcp__knowns__create_task({\n "title": "Follow-up: <description>",\n "description": "Related to @task-$ARGUMENTS"\n})\n```\n{{else}}\n```bash\nknowns task create "Follow-up: <description>" \\\n -d "Related to @task-$ARGUMENTS" \\\n --ac "New requirement"\n```\n{{/if}}\n\n## Remember\n\n- Always document reopen reason\n- Start timer when reopening\n- Add new AC for traceability\n- Stop timer when done\n- Consider if new task is more appropriate\n';
|
|
60143
|
+
|
|
60144
|
+
// src/instructions/skills/knowns.task/SKILL.md
|
|
60145
|
+
var SKILL_default10 = '---\nname: knowns.task\ndescription: Use when working on a Knowns task - view task details and decide next action\n---\n\n# Working on a Task\n\nView task details and determine the appropriate next action.\n\n**Announce at start:** "I\'m using the knowns.task skill to view task [ID]."\n\n**Core principle:** VIEW AND ROUTE - analyze state, suggest next skill.\n\n## The Process\n\n### Step 1: View Task\n\n{{#if mcp}}\n```json\nmcp__knowns__get_task({ "taskId": "$ARGUMENTS" })\n```\n{{else}}\n```bash\nknowns task $ARGUMENTS --plain\n```\n{{/if}}\n\n### Step 2: Analyze State\n\nCheck:\n- **Status**: todo, in-progress, done?\n- **Assignee**: Assigned to someone?\n- **AC**: Any checked? All checked?\n- **Plan**: Has implementation plan?\n- **Refs**: Any `@doc/` or `@task-` references?\n\n### Step 3: Suggest Next Action\n\nBased on task state, recommend the appropriate skill:\n\n| State | Next Skill |\n|-------|------------|\n| `todo`, not started | `knowns.task.plan` |\n| `in-progress`, no plan | `knowns.task.plan` |\n| `in-progress`, has plan | `knowns.task.implement` |\n| `done`, needs changes | `knowns.task.reopen` |\n| Requirements unclear | `knowns.task.brainstorm` |\n\n### Step 4: Follow Refs (if needed)\n\nIf task has references, follow them for context:\n\n{{#if mcp}}\n```json\n// Doc ref: @doc/path \u2192\nmcp__knowns__get_doc({ "path": "<path>", "smart": true })\n\n// Task ref: @task-<id> \u2192\nmcp__knowns__get_task({ "taskId": "<id>" })\n```\n{{else}}\n```bash\n# Doc ref: @doc/path \u2192\nknowns doc "<path>" --plain\n\n# Task ref: @task-<id> \u2192\nknowns task <id> --plain\n```\n{{/if}}\n\n## Quick Actions\n\n**Start planning (includes taking ownership):**\n```\n/knowns.task.plan $ARGUMENTS\n```\n\n**Continue implementing:**\n```\n/knowns.task.implement $ARGUMENTS\n```\n\n**Requirements unclear:**\n```\n/knowns.task.brainstorm $ARGUMENTS\n```\n\n**Reopen completed task:**\n```\n/knowns.task.reopen $ARGUMENTS\n```\n\n## Remember\n\n- This skill is for viewing and routing\n- Use `plan` to start a new task (takes ownership, starts timer)\n- Use `implement` to continue/complete in-progress tasks\n- Always follow refs for full context\n';
|
|
60146
|
+
|
|
60147
|
+
// src/instructions/skills/knowns.template/SKILL.md
|
|
60148
|
+
var SKILL_default11 = '---\nname: knowns.template\ndescription: Use when generating code from templates - list, run, or create templates\n---\n\n# Working with Templates\n\nGenerate code from predefined templates stored in `.knowns/templates/`.\n\n**Announce at start:** "I\'m using the knowns.template skill to work with templates."\n\n**Core principle:** USE TEMPLATES FOR CONSISTENT CODE GENERATION.\n\n## The Process\n\n### Step 1: List Available Templates\n\n{{#if mcp}}\n```json\nmcp__knowns__list_templates({})\n```\n{{else}}\n```bash\nknowns template list\n```\n{{/if}}\n\n### Step 2: Get Template Details\n\n{{#if mcp}}\n```json\nmcp__knowns__get_template({ "name": "<template-name>" })\n```\n{{else}}\n```bash\nknowns template info <template-name>\n```\n{{/if}}\n\nCheck:\n- Required variables (prompts)\n- Linked documentation (`doc:`)\n- Files that will be generated\n\n### Step 3: Read Linked Documentation\n\nIf template has a `doc:` field, read it first:\n\n{{#if mcp}}\n```json\nmcp__knowns__get_doc({ "path": "<doc-path>", "smart": true })\n```\n{{else}}\n```bash\nknowns doc "<doc-path>" --plain\n```\n{{/if}}\n\n### Step 4: Run Template\n\n{{#if mcp}}\n```json\n// Dry run first (preview)\nmcp__knowns__run_template({\n "name": "<template-name>",\n "variables": { "name": "MyComponent", "type": "page" },\n "dryRun": true\n})\n\n// Then run for real\nmcp__knowns__run_template({\n "name": "<template-name>",\n "variables": { "name": "MyComponent", "type": "page" },\n "dryRun": false\n})\n```\n{{else}}\n```bash\n# Dry run (preview)\nknowns template run <template-name> --name "MyComponent" --dry-run\n\n# Run for real\nknowns template run <template-name> --name "MyComponent"\n```\n{{/if}}\n\n### Step 5: Create New Template\n\n{{#if mcp}}\n```json\nmcp__knowns__create_template({\n "name": "<template-name>",\n "description": "Template description",\n "doc": "patterns/<related-doc>" // Optional: link to documentation\n})\n```\n{{else}}\n```bash\nknowns template create <template-name>\n```\n{{/if}}\n\nThis creates:\n```\n.knowns/templates/<template-name>/\n \u251C\u2500\u2500 _template.yaml # Config\n \u2514\u2500\u2500 example.ts.hbs # Example file\n```\n\n## Template Config (`_template.yaml`)\n\n```yaml\nname: react-component\ndescription: Create a React component with tests\ndoc: patterns/react-component # Link to documentation\n\nprompts:\n - name: name\n message: Component name?\n validate: required\n\n - name: type\n message: Component type?\n type: select\n choices:\n - page\n - component\n - layout\n\nfiles:\n - template: "{{name}}.tsx.hbs"\n destination: "src/components/{{pascalCase name}}/{{pascalCase name}}.tsx"\n\n - template: "{{name}}.test.tsx.hbs"\n destination: "src/components/{{pascalCase name}}/{{pascalCase name}}.test.tsx"\n condition: "{{includeTests}}"\n```\n\n## Template-Doc Linking\n\nTemplates can reference docs and vice versa:\n\n**In `_template.yaml`:**\n```yaml\ndoc: patterns/react-component\n```\n\n**In doc (markdown):**\n```markdown\nUse @template/react-component to generate.\n```\n\n**AI workflow:**\n1. Get template config\n2. Follow `doc:` link to understand patterns\n3. Run template with appropriate variables\n\n## Handlebars Helpers\n\nTemplates use Handlebars with built-in helpers:\n\n| Helper | Example | Output |\n|--------|---------|--------|\n| `camelCase` | `{{camelCase "my name"}}` | `myName` |\n| `pascalCase` | `{{pascalCase "my name"}}` | `MyName` |\n| `kebabCase` | `{{kebabCase "MyName"}}` | `my-name` |\n| `snakeCase` | `{{snakeCase "MyName"}}` | `my_name` |\n| `upperCase` | `{{upperCase "name"}}` | `NAME` |\n| `lowerCase` | `{{lowerCase "NAME"}}` | `name` |\n\n## CRITICAL: Template Syntax Pitfalls\n\n### JavaScript Template Literals + Handlebars\n\n**NEVER write `$` followed by triple-brace** - Handlebars interprets triple-brace as unescaped output:\n\n```\n// \u274C WRONG - Parse error!\nthis.logger.log(`Created: $` + `\\{{\\{camelCase entity}.id}`);\n\n// \u2705 CORRECT - Add space, use ~ to trim whitespace\nthis.logger.log(`Created: ${ \\{{~camelCase entity~}}.id}`);\n// Output: this.logger.log(`Created: ${product.id}`);\n```\n\n**Rules when writing .hbs templates:**\n1. Never `$` + triple-brace - always add space: `${ \\{{`\n2. Use `~` (tilde) to trim whitespace: `\\{{~helper~}}`\n3. For literal braces, escape with backslash\n\n## When to Use Templates\n\n| Scenario | Action |\n|----------|--------|\n| Creating new component | Run `react-component` template |\n| Adding API endpoint | Run `api-endpoint` template |\n| Setting up new feature | Run `feature-module` template |\n| Consistent file structure | Use template instead of copy-paste |\n\n## Integrated Workflows\n\n### During Implementation (Use Template)\n\n```\nTask \u2192 Read Context \u2192 Find Template \u2192 Generate Code \u2192 Customize\n```\n\n1. Read task and understand requirements\n2. List templates to find applicable one\n3. Get template details and read linked doc\n4. Run template (dry run first, then real)\n5. Customize generated code as needed\n6. Continue with remaining implementation\n\n**Benefits:**\n- Reduces context (no need to generate boilerplate)\n- Ensures consistency with project patterns\n- Faster implementation\n\n### During Extract (Create Template)\n\n```\nContext \u2192 Identify Pattern \u2192 Create Doc \u2192 Create Template \u2192 Link Both\n```\n\n1. Identify repeatable code pattern\n2. Create doc with `/knowns.extract`\n3. Create template with `knowns template create <name>`\n4. Link template to doc: `doc: patterns/<name>`\n5. Link doc to template: `@template/<name>`\n\n**When to create template:**\n- Pattern will be used multiple times\n- Has consistent file structure\n- Can be parameterized\n\n## Checklist\n\n- [ ] Listed available templates\n- [ ] Got template details (prompts, files)\n- [ ] Read linked documentation (if any)\n- [ ] Understood required variables\n- [ ] Ran dry run first\n- [ ] Ran template with correct inputs\n- [ ] Verified generated files\n\n## Remember\n\n- Always dry run first before writing files\n- Check `doc:` link in template for context\n- Templates ensure consistent code structure\n- Create new templates for repeated patterns\n- **NEVER write `$` + triple-brace** - use `${ \\{{~helper~}}` instead (add space, use tilde)\n';
|
|
60149
|
+
|
|
60150
|
+
// src/instructions/skills/index.ts
|
|
60151
|
+
function parseSkillFrontmatter(content) {
|
|
60152
|
+
const lines = content.trim().split("\n");
|
|
60153
|
+
let name = "";
|
|
60154
|
+
let description = "";
|
|
60155
|
+
if (lines[0] === "---") {
|
|
60156
|
+
for (let i = 1; i < lines.length; i++) {
|
|
60157
|
+
if (lines[i] === "---") break;
|
|
60158
|
+
const nameMatch = lines[i].match(/^name:\s*(.+)$/);
|
|
60159
|
+
if (nameMatch) name = nameMatch[1].trim();
|
|
60160
|
+
const descMatch = lines[i].match(/^description:\s*(.+)$/);
|
|
60161
|
+
if (descMatch) description = descMatch[1].trim();
|
|
60162
|
+
}
|
|
60163
|
+
}
|
|
60164
|
+
return { name, description };
|
|
60165
|
+
}
|
|
60166
|
+
function createSkill(content, folderName) {
|
|
60167
|
+
const { name, description } = parseSkillFrontmatter(content);
|
|
60168
|
+
return {
|
|
60169
|
+
name: name || folderName,
|
|
60170
|
+
folderName,
|
|
60171
|
+
description,
|
|
60172
|
+
content: content.trim()
|
|
60173
|
+
};
|
|
60174
|
+
}
|
|
60175
|
+
var SKILL_TASK = createSkill(SKILL_default10, "knowns.task");
|
|
60176
|
+
var SKILL_TASK_PLAN = createSkill(SKILL_default8, "knowns.task.plan");
|
|
60177
|
+
var SKILL_TASK_IMPLEMENT = createSkill(SKILL_default7, "knowns.task.implement");
|
|
60178
|
+
var SKILL_TASK_BRAINSTORM = createSkill(SKILL_default6, "knowns.task.brainstorm");
|
|
60179
|
+
var SKILL_TASK_REOPEN = createSkill(SKILL_default9, "knowns.task.reopen");
|
|
60180
|
+
var SKILL_EXTRACT = createSkill(SKILL_default3, "knowns.extract");
|
|
60181
|
+
var SKILL_DOC = createSkill(SKILL_default2, "knowns.doc");
|
|
60182
|
+
var SKILL_COMMIT = createSkill(SKILL_default, "knowns.commit");
|
|
60183
|
+
var SKILL_INIT = createSkill(SKILL_default4, "knowns.init");
|
|
60184
|
+
var SKILL_RESEARCH = createSkill(SKILL_default5, "knowns.research");
|
|
60185
|
+
var SKILL_TEMPLATE = createSkill(SKILL_default11, "knowns.template");
|
|
60186
|
+
|
|
60059
60187
|
// src/mcp/handlers/template.ts
|
|
60060
|
-
var TEMPLATES_DIR3 =
|
|
60188
|
+
var TEMPLATES_DIR3 = join17(process.cwd(), ".knowns", "templates");
|
|
60189
|
+
var PROJECT_ROOT = process.cwd();
|
|
60061
60190
|
var listTemplatesSchema = external_exports3.object({});
|
|
60062
60191
|
var getTemplateSchema = external_exports3.object({
|
|
60063
60192
|
name: external_exports3.string()
|
|
@@ -60096,7 +60225,7 @@ var templateTools = [
|
|
|
60096
60225
|
properties: {
|
|
60097
60226
|
name: {
|
|
60098
60227
|
type: "string",
|
|
60099
|
-
description: "Template name
|
|
60228
|
+
description: "Template name. Supports import prefix (e.g., 'knowns/component' for imported template)"
|
|
60100
60229
|
}
|
|
60101
60230
|
},
|
|
60102
60231
|
required: ["name"]
|
|
@@ -60110,7 +60239,7 @@ var templateTools = [
|
|
|
60110
60239
|
properties: {
|
|
60111
60240
|
name: {
|
|
60112
60241
|
type: "string",
|
|
60113
|
-
description: "Template name to run"
|
|
60242
|
+
description: "Template name to run. Supports import prefix (e.g., 'knowns/component' for imported template)"
|
|
60114
60243
|
},
|
|
60115
60244
|
variables: {
|
|
60116
60245
|
type: "object",
|
|
@@ -60163,7 +60292,7 @@ async function handleListTemplates(_args) {
|
|
|
60163
60292
|
const templateList = [];
|
|
60164
60293
|
for (const t of allTemplates) {
|
|
60165
60294
|
try {
|
|
60166
|
-
const loaded = await listTemplates(
|
|
60295
|
+
const loaded = await listTemplates(join17(t.path, ".."));
|
|
60167
60296
|
const match2 = loaded.find((l) => l.name === t.name);
|
|
60168
60297
|
templateList.push({
|
|
60169
60298
|
name: t.name,
|
|
@@ -60199,13 +60328,14 @@ async function handleListTemplates(_args) {
|
|
|
60199
60328
|
}
|
|
60200
60329
|
async function handleGetTemplate(args) {
|
|
60201
60330
|
const input = getTemplateSchema.parse(args);
|
|
60202
|
-
if (!existsSync12(TEMPLATES_DIR3)) {
|
|
60203
|
-
return errorResponse("No templates directory found");
|
|
60204
|
-
}
|
|
60205
60331
|
try {
|
|
60206
|
-
const
|
|
60332
|
+
const resolved = await resolveTemplate(PROJECT_ROOT, input.name);
|
|
60333
|
+
if (!resolved) {
|
|
60334
|
+
return errorResponse(`Template not found: ${input.name}. Use list_templates to see available templates.`);
|
|
60335
|
+
}
|
|
60336
|
+
const template = await loadTemplate(resolved.path);
|
|
60207
60337
|
if (!template) {
|
|
60208
|
-
return errorResponse(`
|
|
60338
|
+
return errorResponse(`Failed to load template: ${input.name}`);
|
|
60209
60339
|
}
|
|
60210
60340
|
const prompts2 = template.config.prompts?.map((p) => ({
|
|
60211
60341
|
name: p.name,
|
|
@@ -60240,6 +60370,8 @@ async function handleGetTemplate(args) {
|
|
|
60240
60370
|
actions: actions || [],
|
|
60241
60371
|
messages: template.config.messages
|
|
60242
60372
|
},
|
|
60373
|
+
source: resolved.isImported ? resolved.source : "local",
|
|
60374
|
+
isImported: resolved.isImported,
|
|
60243
60375
|
hint: template.config.doc ? `This template links to @doc/${template.config.doc}. Read the doc for context before running.` : void 0
|
|
60244
60376
|
});
|
|
60245
60377
|
} catch (error48) {
|
|
@@ -60249,13 +60381,14 @@ async function handleGetTemplate(args) {
|
|
|
60249
60381
|
async function handleRunTemplate(args) {
|
|
60250
60382
|
const input = runTemplateSchema.parse(args);
|
|
60251
60383
|
const dryRun = input.dryRun !== false;
|
|
60252
|
-
if (!existsSync12(TEMPLATES_DIR3)) {
|
|
60253
|
-
return errorResponse("No templates directory found");
|
|
60254
|
-
}
|
|
60255
60384
|
try {
|
|
60256
|
-
const
|
|
60385
|
+
const resolved = await resolveTemplate(PROJECT_ROOT, input.name);
|
|
60386
|
+
if (!resolved) {
|
|
60387
|
+
return errorResponse(`Template not found: ${input.name}. Use list_templates to see available templates.`);
|
|
60388
|
+
}
|
|
60389
|
+
const template = await loadTemplate(resolved.path);
|
|
60257
60390
|
if (!template) {
|
|
60258
|
-
return errorResponse(`
|
|
60391
|
+
return errorResponse(`Failed to load template: ${input.name}`);
|
|
60259
60392
|
}
|
|
60260
60393
|
const requiredPrompts = template.config.prompts?.filter((p) => p.validate === "required") || [];
|
|
60261
60394
|
const missingVars = requiredPrompts.filter((p) => !input.variables?.[p.name]).map((p) => p.name);
|
|
@@ -60273,7 +60406,7 @@ async function handleRunTemplate(args) {
|
|
|
60273
60406
|
}
|
|
60274
60407
|
}
|
|
60275
60408
|
const result = await runTemplate(template, {
|
|
60276
|
-
projectRoot:
|
|
60409
|
+
projectRoot: PROJECT_ROOT,
|
|
60277
60410
|
values,
|
|
60278
60411
|
dryRun
|
|
60279
60412
|
});
|
|
@@ -60287,6 +60420,8 @@ async function handleRunTemplate(args) {
|
|
|
60287
60420
|
success: true,
|
|
60288
60421
|
dryRun,
|
|
60289
60422
|
template: input.name,
|
|
60423
|
+
source: resolved.isImported ? resolved.source : "local",
|
|
60424
|
+
isImported: resolved.isImported,
|
|
60290
60425
|
variables: values,
|
|
60291
60426
|
created: result.created || [],
|
|
60292
60427
|
modified: result.modified || [],
|
|
@@ -60300,11 +60435,11 @@ async function handleRunTemplate(args) {
|
|
|
60300
60435
|
async function handleCreateTemplate(args) {
|
|
60301
60436
|
const input = createTemplateSchema.parse(args);
|
|
60302
60437
|
try {
|
|
60303
|
-
if (!
|
|
60438
|
+
if (!existsSync13(TEMPLATES_DIR3)) {
|
|
60304
60439
|
await mkdir9(TEMPLATES_DIR3, { recursive: true });
|
|
60305
60440
|
}
|
|
60306
|
-
const templateDir =
|
|
60307
|
-
if (
|
|
60441
|
+
const templateDir = join17(TEMPLATES_DIR3, input.name);
|
|
60442
|
+
if (existsSync13(templateDir)) {
|
|
60308
60443
|
return errorResponse(`Template "${input.name}" already exists`);
|
|
60309
60444
|
}
|
|
60310
60445
|
await mkdir9(templateDir, { recursive: true });
|
|
@@ -60333,7 +60468,7 @@ messages:
|
|
|
60333
60468
|
success: |
|
|
60334
60469
|
\u2713 Created {{name}}!
|
|
60335
60470
|
`;
|
|
60336
|
-
await
|
|
60471
|
+
await writeFile6(join17(templateDir, "_template.yaml"), configContent, "utf-8");
|
|
60337
60472
|
const exampleTemplate = `/**
|
|
60338
60473
|
* {{pascalCase name}}
|
|
60339
60474
|
* Generated from ${input.name} template
|
|
@@ -60343,7 +60478,7 @@ export function {{camelCase name}}() {
|
|
|
60343
60478
|
console.log("Hello from {{name}}!");
|
|
60344
60479
|
}
|
|
60345
60480
|
`;
|
|
60346
|
-
await
|
|
60481
|
+
await writeFile6(join17(templateDir, "example.ts.hbs"), exampleTemplate, "utf-8");
|
|
60347
60482
|
return successResponse({
|
|
60348
60483
|
message: `Created template: ${input.name}`,
|
|
60349
60484
|
template: {
|
|
@@ -60366,9 +60501,9 @@ export function {{camelCase name}}() {
|
|
|
60366
60501
|
|
|
60367
60502
|
// src/mcp/handlers/search.ts
|
|
60368
60503
|
var import_gray_matter5 = __toESM(require_gray_matter(), 1);
|
|
60369
|
-
import { existsSync as
|
|
60370
|
-
import { readFile as
|
|
60371
|
-
import { join as
|
|
60504
|
+
import { existsSync as existsSync14 } from "node:fs";
|
|
60505
|
+
import { readFile as readFile9, readdir as readdir8 } from "node:fs/promises";
|
|
60506
|
+
import { join as join18 } from "node:path";
|
|
60372
60507
|
var searchSchema = external_exports3.object({
|
|
60373
60508
|
query: external_exports3.string(),
|
|
60374
60509
|
type: external_exports3.enum(["all", "task", "doc"]).optional(),
|
|
@@ -60455,12 +60590,12 @@ function calculateDocScore(title, description, content, tags, query) {
|
|
|
60455
60590
|
}
|
|
60456
60591
|
async function getAllMdFiles2(dir, basePath = "") {
|
|
60457
60592
|
const files = [];
|
|
60458
|
-
if (!
|
|
60593
|
+
if (!existsSync14(dir)) {
|
|
60459
60594
|
return files;
|
|
60460
60595
|
}
|
|
60461
60596
|
const entries = await readdir8(dir, { withFileTypes: true });
|
|
60462
60597
|
for (const entry of entries) {
|
|
60463
|
-
const fullPath =
|
|
60598
|
+
const fullPath = join18(dir, entry.name);
|
|
60464
60599
|
const relativePath = basePath ? `${basePath}/${entry.name}` : entry.name;
|
|
60465
60600
|
if (entry.isDirectory()) {
|
|
60466
60601
|
const subFiles = await getAllMdFiles2(fullPath, relativePath);
|
|
@@ -60503,14 +60638,14 @@ async function searchTasks(fileStore2, query, filters) {
|
|
|
60503
60638
|
})).sort((a, b) => b.score - a.score);
|
|
60504
60639
|
}
|
|
60505
60640
|
async function searchDocs(docsDir, query, tagFilter) {
|
|
60506
|
-
if (!
|
|
60641
|
+
if (!existsSync14(docsDir)) {
|
|
60507
60642
|
return [];
|
|
60508
60643
|
}
|
|
60509
60644
|
const mdFiles = await getAllMdFiles2(docsDir);
|
|
60510
60645
|
const q = query.toLowerCase();
|
|
60511
60646
|
const results = [];
|
|
60512
60647
|
for (const file3 of mdFiles) {
|
|
60513
|
-
const fileContent = await
|
|
60648
|
+
const fileContent = await readFile9(join18(docsDir, file3), "utf-8");
|
|
60514
60649
|
const { data, content } = (0, import_gray_matter5.default)(fileContent);
|
|
60515
60650
|
const metadata = data;
|
|
60516
60651
|
if (tagFilter && !metadata.tags?.includes(tagFilter)) {
|
|
@@ -60547,7 +60682,7 @@ async function handleSearch(args, fileStore2) {
|
|
|
60547
60682
|
const input = searchSchema.parse(args);
|
|
60548
60683
|
const searchType = input.type || "all";
|
|
60549
60684
|
const limit = input.limit || 20;
|
|
60550
|
-
const docsDir =
|
|
60685
|
+
const docsDir = join18(process.cwd(), ".knowns", "docs");
|
|
60551
60686
|
let taskResults = [];
|
|
60552
60687
|
let docResults = [];
|
|
60553
60688
|
if (searchType === "all" || searchType === "task") {
|
|
@@ -60666,7 +60801,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
60666
60801
|
});
|
|
60667
60802
|
server.setRequestHandler(ListResourcesRequestSchema, async () => {
|
|
60668
60803
|
const tasks = await fileStore.getAllTasks();
|
|
60669
|
-
const docsDir =
|
|
60804
|
+
const docsDir = join19(process.cwd(), ".knowns", "docs");
|
|
60670
60805
|
const taskResources = tasks.map((task) => ({
|
|
60671
60806
|
uri: `knowns://task/${task.id}`,
|
|
60672
60807
|
name: task.title,
|
|
@@ -60674,14 +60809,14 @@ server.setRequestHandler(ListResourcesRequestSchema, async () => {
|
|
|
60674
60809
|
description: `Task #${task.id}: ${task.title}`
|
|
60675
60810
|
}));
|
|
60676
60811
|
const docResources = [];
|
|
60677
|
-
if (
|
|
60812
|
+
if (existsSync15(docsDir)) {
|
|
60678
60813
|
const { readdir: readdir9 } = await import("node:fs/promises");
|
|
60679
60814
|
async function getAllMdFiles3(dir, basePath = "") {
|
|
60680
60815
|
const files = [];
|
|
60681
60816
|
const entries = await readdir9(dir, { withFileTypes: true });
|
|
60682
60817
|
for (const entry of entries) {
|
|
60683
|
-
const fullPath =
|
|
60684
|
-
const relativePath = normalizePath(basePath ?
|
|
60818
|
+
const fullPath = join19(dir, entry.name);
|
|
60819
|
+
const relativePath = normalizePath(basePath ? join19(basePath, entry.name) : entry.name);
|
|
60685
60820
|
if (entry.isDirectory()) {
|
|
60686
60821
|
const subFiles = await getAllMdFiles3(fullPath, relativePath);
|
|
60687
60822
|
files.push(...subFiles);
|
|
@@ -60693,8 +60828,8 @@ server.setRequestHandler(ListResourcesRequestSchema, async () => {
|
|
|
60693
60828
|
}
|
|
60694
60829
|
const mdFiles = await getAllMdFiles3(docsDir);
|
|
60695
60830
|
for (const file3 of mdFiles) {
|
|
60696
|
-
const filepath =
|
|
60697
|
-
const content = await
|
|
60831
|
+
const filepath = join19(docsDir, file3);
|
|
60832
|
+
const content = await readFile10(filepath, "utf-8");
|
|
60698
60833
|
const { data } = (0, import_gray_matter6.default)(content);
|
|
60699
60834
|
docResources.push({
|
|
60700
60835
|
uri: `knowns://doc/${file3.replace(/\.md$/, "")}`,
|
|
@@ -60730,12 +60865,12 @@ server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
|
|
|
60730
60865
|
const docMatch = uri.match(/^knowns:\/\/doc\/(.+)$/);
|
|
60731
60866
|
if (docMatch) {
|
|
60732
60867
|
const docPath = docMatch[1];
|
|
60733
|
-
const docsDir =
|
|
60734
|
-
const filepath =
|
|
60735
|
-
if (!
|
|
60868
|
+
const docsDir = join19(process.cwd(), ".knowns", "docs");
|
|
60869
|
+
const filepath = join19(docsDir, `${docPath}.md`);
|
|
60870
|
+
if (!existsSync15(filepath)) {
|
|
60736
60871
|
throw new Error(`Documentation ${docPath} not found`);
|
|
60737
60872
|
}
|
|
60738
|
-
const content = await
|
|
60873
|
+
const content = await readFile10(filepath, "utf-8");
|
|
60739
60874
|
const { data, content: docContent } = (0, import_gray_matter6.default)(content);
|
|
60740
60875
|
return {
|
|
60741
60876
|
contents: [
|