@vheins/local-memory-mcp 0.10.1 → 0.10.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,14 +1,92 @@
1
1
  // src/mcp/capabilities.ts
2
- import { fileURLToPath } from "url";
2
+ import { fileURLToPath as fileURLToPath2 } from "url";
3
+ import path2 from "path";
4
+
5
+ // src/mcp/prompts/loader.ts
6
+ import fs from "fs";
3
7
  import path from "path";
4
- var __dirname = path.dirname(fileURLToPath(import.meta.url));
8
+ import { fileURLToPath } from "url";
9
+ import matter from "gray-matter";
10
+ var __filename = fileURLToPath(import.meta.url);
11
+ var __dirname = path.dirname(__filename);
12
+ function findPromptDir() {
13
+ const candidates = [
14
+ // Production if chunked into dist/
15
+ "./prompts",
16
+ // Production if inlined into dist/mcp/
17
+ "../prompts",
18
+ // Dev: /src/mcp/prompts/definitions (next to loader.ts)
19
+ "./definitions"
20
+ ].map((relPath) => path.resolve(__dirname, relPath));
21
+ for (const dir of candidates) {
22
+ if (fs.existsSync(dir)) {
23
+ const files = fs.readdirSync(dir);
24
+ if (files.some((f) => f.endsWith(".md"))) {
25
+ return dir;
26
+ }
27
+ }
28
+ }
29
+ return path.resolve(__dirname, "./definitions");
30
+ }
31
+ var PROMPT_DIR = findPromptDir();
32
+ function listPromptFiles() {
33
+ if (!fs.existsSync(PROMPT_DIR)) return [];
34
+ return fs.readdirSync(PROMPT_DIR).filter((file) => file.endsWith(".md")).map((file) => file.replace(/\.md$/, "")).sort();
35
+ }
36
+ function loadPromptFromMarkdown(name) {
37
+ const filePath = path.join(PROMPT_DIR, `${name}.md`);
38
+ if (!fs.existsSync(filePath)) {
39
+ throw new Error(`Prompt file not found: ${filePath}`);
40
+ }
41
+ const fileContent = fs.readFileSync(filePath, "utf-8");
42
+ const { data, content } = matter(fileContent);
43
+ return {
44
+ name: data.name || name,
45
+ description: data.description || "",
46
+ arguments: data.arguments || [],
47
+ agent: data.agent,
48
+ content: content.trim()
49
+ };
50
+ }
51
+ function findServerInstructionsDir() {
52
+ const candidates = [
53
+ // Production if chunked into dist/
54
+ "./prompts/server",
55
+ // Production if inlined into dist/mcp/
56
+ "../prompts/server",
57
+ // Dev: /src/mcp/prompts/server (next to loader.ts)
58
+ "./server"
59
+ ].map((relPath) => path.resolve(__dirname, relPath));
60
+ for (const dir of candidates) {
61
+ if (fs.existsSync(dir)) {
62
+ const filePath = path.join(dir, "instructions.md");
63
+ if (fs.existsSync(filePath)) {
64
+ return dir;
65
+ }
66
+ }
67
+ }
68
+ return path.resolve(__dirname, "./server");
69
+ }
70
+ var SERVER_DIR = findServerInstructionsDir();
71
+ function loadServerInstructions() {
72
+ const filePath = path.join(SERVER_DIR, "instructions.md");
73
+ if (!fs.existsSync(filePath)) {
74
+ throw new Error(`Server instructions file not found: ${filePath}`);
75
+ }
76
+ const fileContent = fs.readFileSync(filePath, "utf-8");
77
+ const { content } = matter(fileContent);
78
+ return content.trim();
79
+ }
80
+
81
+ // src/mcp/capabilities.ts
82
+ var __dirname2 = path2.dirname(fileURLToPath2(import.meta.url));
5
83
  var pkgVersion = "0.1.0";
6
- if ("0.10.1") {
7
- pkgVersion = "0.10.1";
84
+ if ("0.10.3") {
85
+ pkgVersion = "0.10.3";
8
86
  } else {
9
- let searchDir = __dirname;
87
+ let searchDir = __dirname2;
10
88
  for (let i = 0; i < 5; i++) {
11
- const candidate = path.join(searchDir, "package.json");
89
+ const candidate = path2.join(searchDir, "package.json");
12
90
  try {
13
91
  if (fs.existsSync(candidate)) {
14
92
  const pkg = JSON.parse(fs.readFileSync(candidate, "utf8"));
@@ -19,57 +97,14 @@ if ("0.10.1") {
19
97
  }
20
98
  } catch {
21
99
  }
22
- searchDir = path.dirname(searchDir);
100
+ searchDir = path2.dirname(searchDir);
23
101
  }
24
102
  }
25
103
  var MCP_PROTOCOL_VERSION = "2025-03-26";
26
- var SERVER_INSTRUCTIONS = `
27
- Local Memory MCP \u2014 persistent memory, task coordination, and coding standards for AI agents.
28
-
29
- ## When to use this server
30
- Use at the START of every session and before any implementation work:
31
- 1. Call \`task-list\` to sync active/pending tasks for the current repository.
32
- 2. Call \`handoff-list\` to check pending context transfers. Close stale handoffs with \`handoff-update\`.
33
- 3. Call \`memory-search\` and \`memory-synthesize\` to hydrate architectural context before coding.
34
- 4. Call \`standard-search\` before any code edit, test edit, refactor, migration, or implementation decision. This is mandatory even for small tasks; use the task intent, affected files, inferred language, stack, and repo as filters. If no relevant standards are returned, continue and state that no applicable standards were found.
35
-
36
- ## Core Workflows
37
-
38
- **Memory**: \`memory-search\` \u2192 \`memory-detail\` \u2192 \`memory-store\` / \`memory-update\`
39
- - Store only durable knowledge (architecture, patterns, decisions, fixes).
40
- - Use \`memory-acknowledge\` after generating code from memory results.
41
- - Global scope only for cross-repo rules; prefer repo-specific scope.
42
-
43
- **Tasks**: \`task-list\` \u2192 \`task-claim\` \u2192 \`task-update\` (in_progress \u2192 completed)
44
- - Register planned steps via \`task-create\` before execution.
45
- - Never skip intermediate \`in_progress\` state before \`completed\`.
46
- - Completing a task auto-releases claims and expires linked handoffs.
47
-
48
- **Standards**: \`standard-search\` \u2192 \`standard-store\`
49
- - \`standard-search\` is the pre-implementation gate for code, tests, refactors, migrations, and implementation decisions.
50
- - One rule per entry. Treat as normative implementation contracts, not docs summaries.
51
-
52
- **Handoffs/Claims**: \`handoff-list\` \u2192 \`handoff-create\` / \`handoff-update\` | \`task-claim\` / \`claim-release\`
53
- - Create handoffs only for unfinished work with concrete next owner or next steps.
54
- - Do NOT create handoffs as completion summaries \u2014 put those on \`task-update\` comments.
55
-
56
- ## Available Prompts (invoke as slash commands)
57
- - \`session-planner\` \u2014 orient and plan at session start
58
- - \`task-memory-executor\` \u2014 execute tasks with memory and standard enforcement
59
- - \`senior-code-review\` \u2014 full code review against stored standards
60
- - \`memory-guided-review\` \u2014 review using project memory as context
61
- - \`architecture-design\` \u2014 architectural planning and ADR generation
62
- - \`technical-planning\` \u2014 feature planning with task decomposition
63
- - \`root-cause-analysis\` \u2014 structured bug / incident investigation
64
- - \`fix-suggestion\` \u2014 propose and validate fixes
65
- - \`security-triage\` \u2014 security risk assessment
66
- - \`learning-retrospective\` \u2014 capture lessons and update memory
67
- - \`documentation-sync\` \u2014 sync docs with current codebase state
68
- - \`project-briefing\` \u2014 generate repository briefing from memory
69
- `.trim();
104
+ var SERVER_INSTRUCTIONS = loadServerInstructions();
70
105
  var CAPABILITIES = {
71
106
  serverInfo: {
72
- name: "mcp-memory-local",
107
+ name: "local-memory-mcp",
73
108
  version: pkgVersion,
74
109
  instructions: SERVER_INSTRUCTIONS
75
110
  },
@@ -90,7 +125,7 @@ var CAPABILITIES = {
90
125
  };
91
126
 
92
127
  // src/mcp/utils/logger.ts
93
- import fs from "fs";
128
+ import fs2 from "fs";
94
129
  var LEVELS = {
95
130
  debug: 0,
96
131
  info: 1,
@@ -210,11 +245,11 @@ function addLogSink(sink) {
210
245
  }
211
246
  var LOG_LEVEL_VALUES = Object.keys(LEVELS);
212
247
  function createFileSink(logDir, maxFiles = 5) {
213
- fs.mkdirSync(logDir, { recursive: true });
214
- const existing = fs.readdirSync(logDir).filter((f) => f.startsWith("mcp-") && f.endsWith(".log")).sort();
248
+ fs2.mkdirSync(logDir, { recursive: true });
249
+ const existing = fs2.readdirSync(logDir).filter((f) => f.startsWith("mcp-") && f.endsWith(".log")).sort();
215
250
  while (existing.length >= maxFiles) {
216
251
  try {
217
- fs.unlinkSync(`${logDir}/${existing.shift()}`);
252
+ fs2.unlinkSync(`${logDir}/${existing.shift()}`);
218
253
  } catch {
219
254
  }
220
255
  }
@@ -224,7 +259,7 @@ function createFileSink(logDir, maxFiles = 5) {
224
259
  const line = `${(/* @__PURE__ */ new Date()).toISOString()} [${payload.level.toUpperCase()}] [pid:${process.pid}] ${JSON.stringify(payload.data)}
225
260
  `;
226
261
  try {
227
- fs.appendFileSync(logFile, line);
262
+ fs2.appendFileSync(logFile, line);
228
263
  } catch {
229
264
  }
230
265
  };
@@ -232,8 +267,8 @@ function createFileSink(logDir, maxFiles = 5) {
232
267
 
233
268
  // src/mcp/storage/sqlite.ts
234
269
  import Database from "better-sqlite3";
235
- import path3 from "path";
236
- import fs3 from "fs";
270
+ import path4 from "path";
271
+ import fs4 from "fs";
237
272
  import os from "os";
238
273
 
239
274
  // src/mcp/storage/migrations.ts
@@ -2725,8 +2760,8 @@ var HandoffEntity = class extends BaseEntity {
2725
2760
 
2726
2761
  // src/mcp/storage/write-lock.ts
2727
2762
  import lockfile from "proper-lockfile";
2728
- import path2 from "path";
2729
- import fs2 from "fs";
2763
+ import path3 from "path";
2764
+ import fs3 from "fs";
2730
2765
  var LOCK_STALE_MS = 3e4;
2731
2766
  var LOCK_RETRY_DELAY_MS = 200;
2732
2767
  var LOCK_RETRY_COUNT = 250;
@@ -2735,9 +2770,9 @@ var WriteLock = class {
2735
2770
  locked = false;
2736
2771
  constructor(dbPath) {
2737
2772
  this.lockTarget = dbPath;
2738
- if (!fs2.existsSync(dbPath)) {
2739
- fs2.mkdirSync(path2.dirname(dbPath), { recursive: true });
2740
- fs2.writeFileSync(dbPath, "");
2773
+ if (!fs3.existsSync(dbPath)) {
2774
+ fs3.mkdirSync(path3.dirname(dbPath), { recursive: true });
2775
+ fs3.writeFileSync(dbPath, "");
2741
2776
  }
2742
2777
  }
2743
2778
  /**
@@ -2789,13 +2824,13 @@ var WriteLock = class {
2789
2824
  // src/mcp/storage/sqlite.ts
2790
2825
  function resolveDbPath() {
2791
2826
  if (process.env.MEMORY_DB_PATH) return process.env.MEMORY_DB_PATH;
2792
- const standardConfigDir = process.platform === "win32" ? path3.join(os.homedir(), ".local-memory-mcp") : process.platform === "darwin" ? path3.join(os.homedir(), "Library", "Application Support", "local-memory-mcp") : path3.join(os.homedir(), ".config", "local-memory-mcp");
2793
- const standardPath = path3.join(standardConfigDir, "memory.db");
2794
- if (fs3.existsSync(standardPath)) return standardPath;
2795
- const legacyPath = path3.join(os.homedir(), ".config", "local-memory-mcp", "memory.db");
2796
- if (fs3.existsSync(legacyPath)) return legacyPath;
2797
- const localCwdFile = path3.join(process.cwd(), "storage", "memory.db");
2798
- if (fs3.existsSync(localCwdFile)) return localCwdFile;
2827
+ const standardConfigDir = process.platform === "win32" ? path4.join(os.homedir(), ".local-memory-mcp") : process.platform === "darwin" ? path4.join(os.homedir(), "Library", "Application Support", "local-memory-mcp") : path4.join(os.homedir(), ".config", "local-memory-mcp");
2828
+ const standardPath = path4.join(standardConfigDir, "memory.db");
2829
+ if (fs4.existsSync(standardPath)) return standardPath;
2830
+ const legacyPath = path4.join(os.homedir(), ".config", "local-memory-mcp", "memory.db");
2831
+ if (fs4.existsSync(legacyPath)) return legacyPath;
2832
+ const localCwdFile = path4.join(process.cwd(), "storage", "memory.db");
2833
+ if (fs4.existsSync(localCwdFile)) return localCwdFile;
2799
2834
  return standardPath;
2800
2835
  }
2801
2836
  var DB_PATH = resolveDbPath();
@@ -2814,9 +2849,9 @@ var SQLiteStore = class _SQLiteStore {
2814
2849
  const finalPath = dbPath ?? DB_PATH;
2815
2850
  this.dbPathInstance = finalPath;
2816
2851
  if (finalPath !== ":memory:") {
2817
- const dbDir = path3.dirname(finalPath);
2818
- if (!fs3.existsSync(dbDir)) {
2819
- fs3.mkdirSync(dbDir, { recursive: true });
2852
+ const dbDir = path4.dirname(finalPath);
2853
+ if (!fs4.existsSync(dbDir)) {
2854
+ fs4.mkdirSync(dbDir, { recursive: true });
2820
2855
  }
2821
2856
  }
2822
2857
  this.db = new Database(finalPath);
@@ -2871,12 +2906,12 @@ var SQLiteStore = class _SQLiteStore {
2871
2906
  */
2872
2907
  _attemptRecovery(dbPath) {
2873
2908
  const backupPath = dbPath + ".backup";
2874
- if (fs3.existsSync(backupPath)) {
2909
+ if (fs4.existsSync(backupPath)) {
2875
2910
  logger.warn("[SQLiteStore] Attempting recovery from backup", { backupPath });
2876
2911
  try {
2877
2912
  const corruptPath = `${dbPath}.corrupt_${(/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "").slice(0, 15)}`;
2878
- fs3.copyFileSync(dbPath, corruptPath);
2879
- fs3.copyFileSync(backupPath, dbPath);
2913
+ fs4.copyFileSync(dbPath, corruptPath);
2914
+ fs4.copyFileSync(backupPath, dbPath);
2880
2915
  logger.warn("[SQLiteStore] Recovery successful. Corrupt file saved to", { corruptPath });
2881
2916
  } catch (err) {
2882
2917
  logger.error("[SQLiteStore] Recovery failed", { error: String(err) });
@@ -2894,7 +2929,7 @@ var SQLiteStore = class _SQLiteStore {
2894
2929
  try {
2895
2930
  this.db.pragma("wal_checkpoint(PASSIVE)");
2896
2931
  const backupPath = this.dbPathInstance + ".backup";
2897
- fs3.copyFileSync(this.dbPathInstance, backupPath);
2932
+ fs4.copyFileSync(this.dbPathInstance, backupPath);
2898
2933
  } catch (err) {
2899
2934
  logger.warn("[SQLiteStore] Backup failed", { error: String(err) });
2900
2935
  }
@@ -3017,8 +3052,8 @@ var RealVectorStore = class {
3017
3052
  };
3018
3053
 
3019
3054
  // src/mcp/session.ts
3020
- import path4 from "path";
3021
- import { fileURLToPath as fileURLToPath2 } from "url";
3055
+ import path5 from "path";
3056
+ import { fileURLToPath as fileURLToPath3 } from "url";
3022
3057
  function createSessionContext() {
3023
3058
  return {
3024
3059
  roots: [],
@@ -3083,7 +3118,7 @@ function getFilesystemRoots(session) {
3083
3118
  for (const root of session.roots) {
3084
3119
  if (!root.uri.startsWith("file://")) continue;
3085
3120
  try {
3086
- resolved.push(path4.resolve(fileURLToPath2(root.uri)));
3121
+ resolved.push(path5.resolve(fileURLToPath3(root.uri)));
3087
3122
  } catch {
3088
3123
  }
3089
3124
  }
@@ -3092,19 +3127,19 @@ function getFilesystemRoots(session) {
3092
3127
  function isPathWithinRoots(targetPath, session) {
3093
3128
  const roots = getFilesystemRoots(session);
3094
3129
  if (roots.length === 0) return true;
3095
- const normalizedTarget = path4.resolve(targetPath);
3130
+ const normalizedTarget = path5.resolve(targetPath);
3096
3131
  return roots.some((rootPath) => {
3097
- const relative = path4.relative(rootPath, normalizedTarget);
3098
- return relative === "" || !relative.startsWith("..") && !path4.isAbsolute(relative);
3132
+ const relative = path5.relative(rootPath, normalizedTarget);
3133
+ return relative === "" || !relative.startsWith("..") && !path5.isAbsolute(relative);
3099
3134
  });
3100
3135
  }
3101
3136
  function findContainingRoot(targetPath, session) {
3102
3137
  const roots = getFilesystemRoots(session);
3103
3138
  if (roots.length === 0) return null;
3104
- const normalizedTarget = path4.resolve(targetPath);
3139
+ const normalizedTarget = path5.resolve(targetPath);
3105
3140
  for (const rootPath of roots) {
3106
- const relative = path4.relative(rootPath, normalizedTarget);
3107
- if (relative === "" || !relative.startsWith("..") && !path4.isAbsolute(relative)) {
3141
+ const relative = path5.relative(rootPath, normalizedTarget);
3142
+ if (relative === "" || !relative.startsWith("..") && !path5.isAbsolute(relative)) {
3108
3143
  return rootPath;
3109
3144
  }
3110
3145
  }
@@ -3113,7 +3148,7 @@ function findContainingRoot(targetPath, session) {
3113
3148
  function inferRepoFromSession(session) {
3114
3149
  const roots = getFilesystemRoots(session);
3115
3150
  if (roots.length === 1) {
3116
- return path4.basename(roots[0]);
3151
+ return path5.basename(roots[0]);
3117
3152
  }
3118
3153
  return void 0;
3119
3154
  }
@@ -3228,7 +3263,7 @@ var SingleTaskCreateSchema = z.object({
3228
3263
  doc_path: z.string().optional(),
3229
3264
  tags: z.array(z.string()).optional(),
3230
3265
  metadata: z.record(z.string(), z.any()).optional(),
3231
- parent_id: z.string().uuid().optional(),
3266
+ parent_id: z.string().optional(),
3232
3267
  depends_on: z.string().uuid().optional(),
3233
3268
  est_tokens: z.number().int().min(0).optional()
3234
3269
  });
@@ -3246,7 +3281,7 @@ var TaskCreateSchema = z.object({
3246
3281
  doc_path: z.string().optional(),
3247
3282
  tags: z.array(z.string()).optional(),
3248
3283
  metadata: z.record(z.string(), z.any()).optional(),
3249
- parent_id: z.string().uuid().optional(),
3284
+ parent_id: z.string().optional(),
3250
3285
  depends_on: z.string().uuid().optional(),
3251
3286
  est_tokens: z.number().int().min(0).optional(),
3252
3287
  // Allow bulk tasks
@@ -3280,7 +3315,7 @@ var TaskUpdateSchema = z.object({
3280
3315
  doc_path: z.string().optional(),
3281
3316
  tags: z.array(z.string()).optional(),
3282
3317
  metadata: z.record(z.string(), z.any()).optional(),
3283
- parent_id: z.string().uuid().optional(),
3318
+ parent_id: z.string().optional(),
3284
3319
  depends_on: z.string().uuid().optional(),
3285
3320
  est_tokens: z.number().int().min(0).optional(),
3286
3321
  force: z.boolean().optional(),
@@ -4080,7 +4115,7 @@ var TOOL_DEFINITIONS = [
4080
4115
  doc_path: { type: "string" },
4081
4116
  tags: { type: "array", items: { type: "string" } },
4082
4117
  metadata: { type: "object" },
4083
- parent_id: { type: "string", format: "uuid" },
4118
+ parent_id: { type: "string", description: "Optional parent task ID (UUID) or parent task code (e.g. TASK-001). Resolved to UUID before storing." },
4084
4119
  depends_on: { type: "string", format: "uuid" },
4085
4120
  est_tokens: { type: "number", minimum: 0, description: "Estimated tokens budget for this task" },
4086
4121
  tasks: {
@@ -4105,7 +4140,7 @@ var TOOL_DEFINITIONS = [
4105
4140
  doc_path: { type: "string" },
4106
4141
  tags: { type: "array", items: { type: "string" } },
4107
4142
  metadata: { type: "object" },
4108
- parent_id: { type: "string", format: "uuid" },
4143
+ parent_id: { type: "string", description: "Optional parent task ID (UUID) or parent task code (e.g. TASK-001). Resolved to UUID before storing." },
4109
4144
  depends_on: { type: "string", format: "uuid" },
4110
4145
  est_tokens: { type: "number", minimum: 0 }
4111
4146
  },
@@ -4174,7 +4209,7 @@ var TOOL_DEFINITIONS = [
4174
4209
  doc_path: { type: "string" },
4175
4210
  tags: { type: "array", items: { type: "string" } },
4176
4211
  metadata: { type: "object" },
4177
- parent_id: { type: "string", format: "uuid" },
4212
+ parent_id: { type: "string", description: "Optional parent task ID (UUID) or parent task code (e.g. TASK-001). Resolved to UUID before storing." },
4178
4213
  depends_on: { type: "string", format: "uuid" },
4179
4214
  est_tokens: {
4180
4215
  type: "number",
@@ -4253,7 +4288,7 @@ var TOOL_DEFINITIONS = [
4253
4288
  properties: {
4254
4289
  repo: {
4255
4290
  type: "string",
4256
- description: "Repository name"
4291
+ description: "Repository name (required)"
4257
4292
  },
4258
4293
  status: {
4259
4294
  type: "string",
@@ -5208,53 +5243,6 @@ function invalidCompletionParams(message) {
5208
5243
  return error;
5209
5244
  }
5210
5245
 
5211
- // src/mcp/prompts/loader.ts
5212
- import fs4 from "fs";
5213
- import path5 from "path";
5214
- import { fileURLToPath as fileURLToPath3 } from "url";
5215
- import matter from "gray-matter";
5216
- var __filename = fileURLToPath3(import.meta.url);
5217
- var __dirname2 = path5.dirname(__filename);
5218
- function findPromptDir() {
5219
- const candidates = [
5220
- // Production if chunked into dist/
5221
- "./prompts",
5222
- // Production if inlined into dist/mcp/
5223
- "../prompts",
5224
- // Dev: /src/mcp/prompts/definitions (next to loader.ts)
5225
- "./definitions"
5226
- ].map((relPath) => path5.resolve(__dirname2, relPath));
5227
- for (const dir of candidates) {
5228
- if (fs4.existsSync(dir)) {
5229
- const files = fs4.readdirSync(dir);
5230
- if (files.some((f) => f.endsWith(".md"))) {
5231
- return dir;
5232
- }
5233
- }
5234
- }
5235
- return path5.resolve(__dirname2, "./definitions");
5236
- }
5237
- var PROMPT_DIR = findPromptDir();
5238
- function listPromptFiles() {
5239
- if (!fs4.existsSync(PROMPT_DIR)) return [];
5240
- return fs4.readdirSync(PROMPT_DIR).filter((file) => file.endsWith(".md")).map((file) => file.replace(/\.md$/, "")).sort();
5241
- }
5242
- function loadPromptFromMarkdown(name) {
5243
- const filePath = path5.join(PROMPT_DIR, `${name}.md`);
5244
- if (!fs4.existsSync(filePath)) {
5245
- throw new Error(`Prompt file not found: ${filePath}`);
5246
- }
5247
- const fileContent = fs4.readFileSync(filePath, "utf-8");
5248
- const { data, content } = matter(fileContent);
5249
- return {
5250
- name: data.name || name,
5251
- description: data.description || "",
5252
- arguments: data.arguments || [],
5253
- agent: data.agent,
5254
- content: content.trim()
5255
- };
5256
- }
5257
-
5258
5246
  // src/mcp/prompts/registry.ts
5259
5247
  function createPromptDefinition(loaded) {
5260
5248
  return {
@@ -10,7 +10,7 @@ import {
10
10
  createFileSink,
11
11
  listResources,
12
12
  logger
13
- } from "../chunk-Z6FENX4V.js";
13
+ } from "../chunk-HXADESDJ.js";
14
14
 
15
15
  // src/dashboard/server.ts
16
16
  import express from "express";
@@ -57,7 +57,7 @@ import {
57
57
  toContextSlug,
58
58
  updateSessionFromInitialize,
59
59
  updateSessionRoots
60
- } from "../chunk-Z6FENX4V.js";
60
+ } from "../chunk-HXADESDJ.js";
61
61
 
62
62
  // src/mcp/server.ts
63
63
  import readline from "readline";
@@ -739,6 +739,14 @@ function capitalize2(str) {
739
739
 
740
740
  // src/mcp/tools/task.manage.ts
741
741
  import { randomUUID as randomUUID2 } from "crypto";
742
+ var UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
743
+ function resolveParentId(value, repo, storage) {
744
+ if (!value) return null;
745
+ if (UUID_REGEX.test(value)) return value;
746
+ const parent = storage.tasks.getTaskByCode(repo, value);
747
+ if (!parent) throw new Error(`parent_id: task with code '${value}' not found in repo '${repo}'`);
748
+ return parent.id;
749
+ }
742
750
  function describeTaskListFilter(status) {
743
751
  if (!status) return "active";
744
752
  if (status === "all") return "all";
@@ -932,7 +940,7 @@ async function handleTaskCreate(args, storage) {
932
940
  if (normalizedStatus === "pending") {
933
941
  if (initialStats.todo + pendingInRequestCount >= 10) {
934
942
  throw new Error(
935
- `Cannot create task '${taskData.task_code}' as 'pending'. Maximum of 10 pending tasks reached.`
943
+ `Cannot create task '${taskData.task_code}' as 'pending'. Maximum of 10 pending tasks reached. Please use status 'backlog' for new tasks instead.`
936
944
  );
937
945
  }
938
946
  }
@@ -962,7 +970,7 @@ async function handleTaskCreate(args, storage) {
962
970
  est_tokens: taskData.est_tokens ?? 0,
963
971
  tags,
964
972
  metadata: taskData.metadata || {},
965
- parent_id: taskData.parent_id || null,
973
+ parent_id: resolveParentId(taskData.parent_id, repo, storage),
966
974
  depends_on: taskData.depends_on || null
967
975
  };
968
976
  storage.tasks.insertTask(task2);
@@ -1005,7 +1013,7 @@ async function handleTaskCreate(args, storage) {
1005
1013
  if (status === "pending") {
1006
1014
  const stats = storage.tasks.getTaskStats(repo);
1007
1015
  if (stats.todo >= 10) {
1008
- throw new Error(`Cannot create task as 'pending'. Maximum of 10 pending tasks reached.`);
1016
+ throw new Error(`Cannot create task as 'pending'. Maximum of 10 pending tasks reached. Please use status 'backlog' for new tasks instead.`);
1009
1017
  }
1010
1018
  }
1011
1019
  const taskId = randomUUID2();
@@ -1036,7 +1044,7 @@ async function handleTaskCreate(args, storage) {
1036
1044
  est_tokens: est_tokens ?? 0,
1037
1045
  tags: finalTags,
1038
1046
  metadata: metadata || {},
1039
- parent_id: parent_id || null,
1047
+ parent_id: resolveParentId(parent_id, repo, storage),
1040
1048
  depends_on: depends_on || null
1041
1049
  };
1042
1050
  storage.tasks.insertTask(task);
@@ -1202,6 +1210,9 @@ async function handleTaskUpdate(args, storage, vectors2) {
1202
1210
  throw new Error(`Duplicate task_code: '${updates.task_code}' already exists`);
1203
1211
  }
1204
1212
  const finalUpdates = { ...updates };
1213
+ if (updates.parent_id !== void 0) {
1214
+ finalUpdates.parent_id = resolveParentId(updates.parent_id, repo, storage);
1215
+ }
1205
1216
  if (updates.phase !== void 0 || updates.tags !== void 0) {
1206
1217
  let currentTags = updates.tags || existingTask.tags || [];
1207
1218
  currentTags = currentTags.filter((t) => !t.startsWith("phase:"));
@@ -3,17 +3,21 @@ name: architecture-design
3
3
  description: Plan system architecture, component layout, and data flow
4
4
  arguments:
5
5
  - name: tech_stack
6
- description: Technology stack
7
- required: true
6
+ description: Technology stack. Optional — auto-detected from repo package files, language, and active task tags if omitted.
7
+ required: false
8
8
  - name: requirements
9
- description: Key requirements
10
- required: true
9
+ description: Key requirements. Optional — inferred from active task description, pending handoff, or recent conversation if omitted.
10
+ required: false
11
11
  agent: System Architect
12
12
  ---
13
- Design system architecture for repository.
13
+ ## 0. CONTEXT RESOLUTION
14
+ - **tech_stack**: If provided, use directly. If omitted — detect from package.json, pyproject.toml, Gemfile, or repo file extensions.
15
+ - **requirements**: If provided, use directly. If omitted — extract from active `in_progress` task description, pending handoff context, or recent conversation.
14
16
 
15
- Stack: {{tech_stack}}
16
- Requirements: {{requirements}}
17
+ Design system architecture for the active repository.
18
+
19
+ Stack: (resolved above)
20
+ Requirements: (resolved above)
17
21
 
18
22
  Output:
19
23
  1. **Component Diagram**: Blocks & responsibilities.
@@ -3,11 +3,10 @@ name: create-task
3
3
  description: Create structured, atomic tasks in Local Memory MCP.
4
4
  arguments:
5
5
  - name: instruction
6
- description: Directive to analyze and break into tasks.
7
- required: true
6
+ description: Directive to analyze and break into tasks. Optional — derived from active task, pending handoff, or recent conversation if omitted.
7
+ required: false
8
8
  agent: Task Planner
9
9
  ---
10
- # Skill: create-task (Task Creation Orchestrator)
11
10
 
12
11
  ## 🚫 FORBIDDEN: NON-EXECUTION
13
12
  DO NOT edit/create/delete files, run commands, or implement code.
@@ -3,14 +3,15 @@ name: csl-from-docs
3
3
  description: Create atomic CSL coding standards entries from a local file or directory path.
4
4
  arguments:
5
5
  - name: path
6
- description: Local path (file or directory) containing documentation or standards.
7
- required: true
6
+ description: Local path (file or directory) containing documentation or standards. Optional — defaults to docs/, README, or prompts definitions directory of the active repo if omitted.
7
+ required: false
8
8
  agent: Documentation Processor
9
9
  ---
10
- Fetch and convert local documentation from the provided path into atomic CSL (Coding Standards Library) entries for the coding_standards entity.
10
+ ## 0. CONTEXT RESOLUTION
11
+ - **path**: If provided, use directly. If omitted — default to `docs/`, `README.md`, or `src/mcp/prompts/definitions/` in the active repo root.
12
+ - **current_repo**: Auto-detect from git remote or active workspace context.
11
13
 
12
- Path: {{path}}
13
- Current repo: {{current_repo}}
14
+ Fetch and convert local documentation from the resolved path into atomic CSL (Coding Standards Library) entries.
14
15
 
15
16
  Goal:
16
17
  - Analyze the provided path.
@@ -3,14 +3,15 @@ name: csl-scrapper
3
3
  description: Scrape trusted documentation from a URL into atomic CSL coding standards entries.
4
4
  arguments:
5
5
  - name: source_url
6
- description: Canonical URL for the documentation source to scrape.
7
- required: true
6
+ description: Canonical URL for the documentation source to scrape. Optional — if omitted, use the most recently referenced URL from conversation context.
7
+ required: false
8
8
  agent: Documentation Scraper
9
9
  ---
10
- Fetch and convert trusted documentation from the provided URL into atomic CSL (Coding Standards Library) entries for the coding_standards entity.
10
+ ## 0. CONTEXT RESOLUTION
11
+ - **source_url**: Use `{{source_url}}` if provided. If omitted — extract from the most recently mentioned URL in conversation context or active task description.
12
+ - **current_repo**: Auto-detect from git remote or active workspace context.
11
13
 
12
- Source URL: {{source_url}}
13
- Current repo: {{current_repo}}
14
+ Fetch and convert trusted documentation from the resolved URL into atomic CSL (Coding Standards Library) entries.
14
15
 
15
16
  Goal:
16
17
  - Use the web_fetch tool (if available) to retrieve the content of the provided Source URL.
@@ -2,35 +2,32 @@
2
2
  name: export-task-to-github
3
3
  description: Export local tasks to GitHub Issues
4
4
  arguments:
5
- - name: owner
6
- description: GitHub repo owner
7
- required: true
8
- - name: repo
9
- description: GitHub repo name
10
- required: true
11
5
  - name: task_id
12
- description: Local task ID
13
- required: true
6
+ description: Local task ID. Optional — if omitted, all tasks in the active repo are exported.
7
+ required: false
14
8
  agent: Integration Architect
15
9
  ---
16
- # Skill: export-task-to-github
17
10
 
18
- ## 1. RETRIEVE
19
- 1. **Fetch**: Call `task-detail` for `task_id`.
20
- 2. **Verify**: Ensure title/description exist. Use `memory-search` for gaps.
11
+ ## 1. IDENTIFY ACTIVE PROJECT
12
+ 1. **Detect**: Get repo name and owner from git remote (e.g. `git remote get-url origin`) or active workspace context. Parse `owner` and `repo` from the remote URL automatically.
13
+ 2. **Verify**: Confirm the detected `owner`/`repo` before proceeding.
21
14
 
22
- ## 2. SYNC CHECK
23
- 1. **Search**: Use `search_issues` for `task_code`.
15
+ ## 2. RETRIEVE
16
+ 1. **Scope**: If `task_id` provided — fetch that single task via `task-detail`. If omitted — call `task-list` (status: `pending,in_progress,completed`) to get all tasks in the active repo, then process each.
17
+ 2. **Verify**: Ensure title/description exist per task. Use `memory-search` for gaps.
18
+
19
+ ## 3. SYNC CHECK
20
+ 1. **Search**: Use `search_issues` for `task_code` scoped to detected `owner`/`repo`.
24
21
  2. **De-duplicate**: If issue exists, update local task `metadata` with URL. DO NOT re-create.
25
22
 
26
- ## 3. CREATE ISSUE
23
+ ## 4. CREATE ISSUE
27
24
  If new:
28
- - **Write**: Use `issue_write` (method: 'create').
25
+ - **Write**: Use `issue_write` (method: 'create') with detected `owner`/`repo`.
29
26
  - **Content**: Match local title/body exactly.
30
27
  - **Traceability**: Append `task_code` and `task_id` to body.
31
28
  - **Comments**: Post local comments via `add_issue_comment`.
32
29
 
33
- ## 4. LINK
30
+ ## 5. LINK
34
31
  - **Update**: Call `task-update`.
35
32
  - **Metadata**: Add GitHub URL.
36
33
  - **Comment**: "Exported to GitHub Issue #X".
@@ -3,21 +3,24 @@ name: fix-suggestion
3
3
  description: Targeted fix with before/after code and test case.
4
4
  arguments:
5
5
  - name: tech_stack
6
- description: Target tech stack.
7
- required: true
6
+ description: Target tech stack. Optional — inferred from repo/context if omitted.
7
+ required: false
8
8
  - name: bug_description
9
- description: Bug behavior.
10
- required: true
9
+ description: Bug behavior. Optional — inferred from active conversation or task context if omitted.
10
+ required: false
11
11
  - name: root_cause
12
- description: Identified root cause.
13
- required: true
12
+ description: Identified root cause. Optional — inferred from recent error/log context if omitted.
13
+ required: false
14
14
  agent: Debugging Expert
15
15
  ---
16
- Provide precise, minimal fix for confirmed bug.
17
16
 
18
- Stack: {{tech_stack}}
19
- Bug: {{bug_description}}
20
- Cause: {{root_cause}}
17
+ ## 0. CONTEXT RESOLUTION
18
+ Resolve missing arguments from available context before proceeding:
19
+ - **tech_stack**: Detect from repo language, package files, or active task tags. Fallback: ask agent to infer from open files.
20
+ - **bug_description**: Extract from active task description, recent conversation, or error logs. Fallback: describe observable broken behavior.
21
+ - **root_cause**: Extract from recent analysis, error traces, or `memory-search` results. Fallback: state "unknown — investigation required".
22
+
23
+ Provide precise, minimal fix for confirmed bug.
21
24
 
22
25
  Output:
23
26
  1. **Explanation**: Why it happens & how fix works.
@@ -4,7 +4,6 @@ description: Import GitHub Issues as local tasks.
4
4
  arguments: []
5
5
  agent: Integration Scout
6
6
  ---
7
- # Skill: import-github-issues
8
7
 
9
8
  ## 1. FETCH
10
9
  - **Primary**: Use `github-mcp-server` to list open issues.
@@ -3,11 +3,15 @@ name: learning-retrospective
3
3
  description: Harvest knowledge from completed work.
4
4
  arguments:
5
5
  - name: task_id
6
- description: ID of completed task.
6
+ description: ID or code of completed task. Optional — defaults to most recently completed task in the active repo.
7
7
  required: false
8
8
  agent: Knowledge Harvester
9
9
  ---
10
- Extract durable knowledge from task {{task_id}} for repository.
10
+ ## 0. CONTEXT RESOLUTION
11
+ 1. **Repo**: Auto-detect from git remote or active workspace context. All MCP calls MUST be scoped to this repo.
12
+ 2. **Task**: If `task_id` provided — use it directly. If omitted — call `task-list` (status: `completed`, limit: 1, ordered by updated_at desc) to get the most recently completed task.
13
+
14
+ Extract durable knowledge from the resolved task for the active repository.
11
15
 
12
16
  Identify and `memory-store`:
13
17
  1. **Mistakes**: Hard-to-find bugs or environment quirks.
@@ -3,11 +3,14 @@ name: memory-guided-review
3
3
  description: Review code for compliance with stored decisions.
4
4
  arguments:
5
5
  - name: file_path
6
- description: File to review.
7
- required: true
6
+ description: File to review. Optional — if omitted, review the currently open/active file in the workspace, or all recently modified files.
7
+ required: false
8
8
  agent: Code Auditor
9
9
  ---
10
- Audit {{file_path}} against stored project knowledge.
10
+ ## 0. CONTEXT RESOLUTION
11
+ - **file_path**: If provided, use it. If omitted — use the currently active/open file from workspace context, or list recently modified files via git and process them.
12
+
13
+ Audit the resolved file(s) against stored project knowledge.
11
14
 
12
15
  Steps:
13
16
  1. **Search Memory**: Call `memory-search` using `current_file_path='{{file_path}}'`.
@@ -3,17 +3,20 @@ name: review-and-audit
3
3
  description: Audit documentation against implementation; generate local tasks for gaps.
4
4
  arguments:
5
5
  - name: target
6
- description: Module, feature, or component to audit.
6
+ description: Module, feature, or component to audit. Optional — if omitted, audits all available documentation against the full implementation.
7
7
  required: false
8
8
  agent: Quality Auditor
9
9
  ---
10
- # Skill: review-and-audit (Audit Agent)
10
+
11
+ ## 0. CONTEXT RESOLUTION
12
+ - **Target**: If `target` provided — scope audit to that module/feature/component. If omitted — **fallback**: audit ALL existing documentation (README, docs/, prompts, schemas) against the full codebase implementation. Enumerate each doc file and compare with corresponding source.
13
+ - **Repo**: Auto-detect from git remote or active workspace context.
11
14
 
12
15
  ## 1. ANALYSIS
13
- 1. **Sequential Discovery**: Explore docs and code sequentially. NO parallel sub-agents.
14
- 2. **UX Audit**: Use `chrome-dev-tools` for visual, navigation, and responsiveness checks.
16
+ 1. **Discovery**: Explore docs and code. Coding sub-agents MAY be used for parallel file reading if the agent supports sub-agents. `chrome-devtools-mcp` MCP tools (direct tool calls) are **ALLOWED**. **FORBIDDEN**: spawning a `browser_subagent` — do NOT invoke the `browser_subagent` tool here. Distinction: MCP tool call = allowed; spawned browser agent process = forbidden.
17
+ 2. **UX Audit**: If target involves UI and the audit explicitly requires visual inspection, note it as a separate task — do NOT block the audit on it.
15
18
  3. **Reference Audit**: Check current tool/prompt/resource definitions through code or the dashboard Reference flow.
16
- 4. **Compare**: Match docs + code findings against live UI to find gaps/misalignments.
19
+ 4. **Compare**: Match docs + code findings to find gaps/misalignments.
17
20
 
18
21
  ## 🚫 FORBIDDEN: NON-EXECUTION
19
22
  DO NOT edit/create/delete files, run commands, or implement code.
@@ -3,22 +3,25 @@ name: review-and-post-issue
3
3
  description: Audit documentation against implementation; generate GitHub issues for gaps.
4
4
  arguments:
5
5
  - name: owner
6
- description: GitHub repo owner.
7
- required: true
6
+ description: GitHub repo owner. Optional — auto-detected from git remote if omitted.
7
+ required: false
8
8
  - name: repo
9
- description: GitHub repo name.
10
- required: true
9
+ description: GitHub repo name. Optional — auto-detected from git remote if omitted.
10
+ required: false
11
11
  - name: target
12
- description: Module, feature, or component to audit.
12
+ description: Module, feature, or component to audit. Optional — if omitted, audits all documentation against the full implementation.
13
13
  required: false
14
14
  agent: Quality Auditor
15
15
  ---
16
- # Skill: review-and-post-issue (Audit Agent)
16
+
17
+ ## 0. CONTEXT RESOLUTION
18
+ 1. **Owner/Repo**: Auto-detect from `git remote get-url origin` or active workspace context. Parse `owner` and `repo` from the remote URL. Verify before proceeding.
19
+ 2. **Target**: If `target` provided — scope to that module/feature. If omitted — **fallback**: audit ALL existing documentation against the full codebase.
17
20
 
18
21
  ## 1. ANALYSIS
19
- 1. **Sequential Discovery**: Explore docs and code sequentially. NO parallel sub-agents.
20
- 2. **UX Audit**: If applicable, use `chrome-dev-tools` for visual, navigation, and responsiveness checks.
21
- 3. **Compare**: Match findings against live UI to find gaps/misalignments.
22
+ 1. **Discovery**: Explore docs and code. Coding sub-agents MAY be used for parallel reading if the agent supports sub-agents. **FORBIDDEN: browser sub-agents**.
23
+ 2. **UX Audit**: If the target involves UI, note it as a separate GitHub issue — do NOT block audit on visual inspection.
24
+ 3. **Compare**: Match docs + code findings to identify gaps/misalignments.
22
25
 
23
26
  ## 🚫 FORBIDDEN: NON-EXECUTION
24
27
  DO NOT edit/create/delete files, run commands, or implement code.
@@ -3,21 +3,22 @@ name: root-cause-analysis
3
3
  description: 5-Why analysis to trace bug origins.
4
4
  arguments:
5
5
  - name: tech_stack
6
- description: Target tech stack.
7
- required: true
6
+ description: Target tech stack. Optional — auto-detected from repo/context if omitted.
7
+ required: false
8
8
  - name: bug_description
9
- description: Bug behavior.
10
- required: true
9
+ description: Bug behavior. Optional — inferred from active task or recent conversation if omitted.
10
+ required: false
11
11
  - name: symptoms
12
12
  description: Logs, errors, metrics.
13
13
  required: false
14
14
  agent: Diagnostic Lead
15
15
  ---
16
- Conduct root cause analysis for repository bug.
16
+ ## 0. CONTEXT RESOLUTION
17
+ - **tech_stack**: If provided, use directly. If omitted — detect from repo package files or active task tags.
18
+ - **bug_description**: If provided, use directly. If omitted — extract from active task description or recent error/log context.
19
+ - **symptoms**: Optional. Use if provided; otherwise infer from available logs or error traces.
17
20
 
18
- Stack: {{tech_stack}}
19
- Bug: {{bug_description}}
20
- Symptoms: {{symptoms}}
21
+ Conduct root cause analysis for the active repository bug.
21
22
 
22
23
  Output:
23
24
  1. **Symptom**: Technical problem restatement.
@@ -3,21 +3,22 @@ name: security-triage
3
3
  description: Assess vulnerability exploitability and prioritize fix.
4
4
  arguments:
5
5
  - name: tech_stack
6
- description: App stack.
7
- required: true
6
+ description: App stack. Optional — auto-detected from repo/context if omitted.
7
+ required: false
8
8
  - name: vulnerability_report
9
- description: Report details (CVE, SAST).
10
- required: true
9
+ description: Report details (CVE, SAST). Optional — extracted from active task description or recent conversation if omitted.
10
+ required: false
11
11
  - name: codebase_context
12
12
  description: Usage context.
13
13
  required: false
14
14
  agent: Security Engineer
15
15
  ---
16
- Triage vulnerability for repository.
16
+ ## 0. CONTEXT RESOLUTION
17
+ - **tech_stack**: If provided, use directly. If omitted — detect from repo package files, language, or active task tags.
18
+ - **vulnerability_report**: If provided, use directly. If omitted — extract from active task description, recent conversation, or attached SAST output.
19
+ - **codebase_context**: Optional. Use if provided.
17
20
 
18
- Stack: {{tech_stack}}
19
- Report: {{vulnerability_report}}
20
- Context: {{codebase_context}}
21
+ Triage the resolved vulnerability for the active repository.
21
22
 
22
23
  Output:
23
24
  1. **Classification**: Type, CVE, CVSS, vector.
@@ -3,17 +3,18 @@ name: senior-code-review
3
3
  description: Comprehensive production-readiness evaluation.
4
4
  arguments:
5
5
  - name: tech_stack
6
- description: Tech stack.
7
- required: true
6
+ description: Tech stack. Optional — auto-detected from repo package files, language, or active task tags if omitted.
7
+ required: false
8
8
  - name: context
9
9
  description: Production context (SLA, data, conventions).
10
10
  required: false
11
11
  agent: Principal Reviewer
12
12
  ---
13
- Perform production-readiness review for repository.
13
+ ## 0. CONTEXT RESOLUTION
14
+ - **tech_stack**: If provided, use directly. If omitted — detect from repo package files or language.
15
+ - **context**: Optional. Use if provided; otherwise infer from task description or recent conversation.
14
16
 
15
- Stack: {{tech_stack}}
16
- Context: {{context}}
17
+ Perform production-readiness review for the active repository.
17
18
 
18
19
  Audit Dimensions:
19
20
  1. **Errors**: Completeness & patterns.
@@ -0,0 +1,46 @@
1
+ ---
2
+ name: server-instructions
3
+ description: Main instructions for the MCP server
4
+ ---
5
+ Local Memory MCP — persistent memory, task coordination, and coding standards for AI agents.
6
+
7
+ ## When to use this server
8
+ Use at the START of every session and before any implementation work:
9
+ 1. Call `task-list` to sync active/pending tasks for the current repository.
10
+ 2. Call `handoff-list` to check pending context transfers. Close stale handoffs with `handoff-update`.
11
+ 3. Call `memory-search` and `memory-synthesize` to hydrate architectural context before coding.
12
+ 4. Call `standard-search` before any code edit, test edit, refactor, migration, or implementation decision. This is mandatory even for small tasks; use the task intent, affected files, inferred language, stack, and repo as filters. If no relevant standards are returned, continue and state that no applicable standards were found.
13
+
14
+ ## Core Workflows
15
+
16
+ **Memory**: `memory-search` → `memory-detail` → `memory-store` / `memory-update`
17
+ - Store only durable knowledge (architecture, patterns, decisions, fixes).
18
+ - Use `memory-acknowledge` after generating code from memory results.
19
+ - Global scope only for cross-repo rules; prefer repo-specific scope.
20
+
21
+ **Tasks**: `task-list` → `task-claim` → `task-update` (in_progress → completed)
22
+ - Register planned steps via `task-create` before execution.
23
+ - Never skip intermediate `in_progress` state before `completed`.
24
+ - Completing a task auto-releases claims and expires linked handoffs.
25
+
26
+ **Standards**: `standard-search` → `standard-store`
27
+ - `standard-search` is the pre-implementation gate for code, tests, refactors, migrations, and implementation decisions.
28
+ - One rule per entry. Treat as normative implementation contracts, not docs summaries.
29
+
30
+ **Handoffs/Claims**: `handoff-list` → `handoff-create` / `handoff-update` | `task-claim` / `claim-release`
31
+ - Create handoffs only for unfinished work with concrete next owner or next steps.
32
+ - Do NOT create handoffs as completion summaries — put those on `task-update` comments.
33
+
34
+ ## Available Prompts (invoke as slash commands)
35
+ - `session-planner` — orient and plan at session start
36
+ - `task-memory-executor` — execute tasks with memory and standard enforcement
37
+ - `senior-code-review` — full code review against stored standards
38
+ - `memory-guided-review` — review using project memory as context
39
+ - `architecture-design` — architectural planning and ADR generation
40
+ - `technical-planning` — feature planning with task decomposition
41
+ - `root-cause-analysis` — structured bug / incident investigation
42
+ - `fix-suggestion` — propose and validate fixes
43
+ - `security-triage` — security risk assessment
44
+ - `learning-retrospective` — capture lessons and update memory
45
+ - `documentation-sync` — sync docs with current codebase state
46
+ - `project-briefing` — generate repository briefing from memory
@@ -3,11 +3,14 @@ name: session-planner
3
3
  description: Break objective into atomic tasks.
4
4
  arguments:
5
5
  - name: objective
6
- description: High-level session goal.
7
- required: true
6
+ description: High-level session goal. Optional — inferred from active task, recent conversation, or pending handoff if omitted.
7
+ required: false
8
8
  agent: Strategy Lead
9
9
  ---
10
- Plan execution for: '{{objective}}'.
10
+ ## 0. CONTEXT RESOLUTION
11
+ - **objective**: If provided, use directly. If omitted — extract from the active `in_progress` task description, the most recent pending handoff, or the last user request in conversation context.
12
+
13
+ Plan execution for the resolved objective.
11
14
 
12
15
  Steps:
13
16
  1. **Orient**: Call `task-list` to avoid duplicate active/backlog work.
@@ -4,7 +4,6 @@ description: Sequentially execute pending tasks for current repository.
4
4
  arguments: []
5
5
  agent: Task Executor
6
6
  ---
7
- # Skill: task-memory-executor
8
7
 
9
8
  ## 1. SYNC & FILTER
10
9
  1. **Identify**: Get repo name (git/context).
@@ -3,11 +3,14 @@ name: tech-affinity-scout
3
3
  description: Scout best practices from similar tech projects.
4
4
  arguments:
5
5
  - name: tags
6
- description: CSV tech tags (e.g., 'react, tailwind').
7
- required: true
6
+ description: CSV tech tags (e.g., 'react, tailwind'). Optional — auto-detected from repo package files, file extensions, or active task tags if omitted.
7
+ required: false
8
8
  agent: Tech Scout
9
9
  ---
10
- Scout for relevant knowledge using tags: [{{tags}}].
10
+ ## 0. CONTEXT RESOLUTION
11
+ - **tags**: If provided, use directly. If omitted — detect from repo package files, file extensions, or active task tags.
12
+
13
+ Scout for relevant knowledge using detected/provided tags.
11
14
 
12
15
  Steps:
13
16
  1. **Search**: Call `memory-search` with `current_tags=[{{tags}}]`.
@@ -3,11 +3,14 @@ name: technical-planning
3
3
  description: Technical blueprint for new feature/product.
4
4
  arguments:
5
5
  - name: objective
6
- description: High-level goal.
7
- required: true
6
+ description: High-level goal. Optional — inferred from active task description, pending handoff, or recent conversation if omitted.
7
+ required: false
8
8
  agent: Technical Architect
9
9
  ---
10
- Create technical blueprint for: '{{objective}}'.
10
+ ## 0. CONTEXT RESOLUTION
11
+ - **objective**: If provided, use directly. If omitted — extract from the active `in_progress` task description, the most recent pending handoff summary, or the last user instruction in conversation context.
12
+
13
+ Create technical blueprint for the resolved objective.
11
14
 
12
15
  Cover:
13
16
  1. **Tech Stack**: Selected/confirmed technologies.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@vheins/local-memory-mcp",
3
- "version": "0.10.1",
3
+ "version": "0.10.3",
4
4
  "description": "MCP Local Memory Service for coding copilot agents",
5
5
  "mcpName": "io.github.vheins/local-memory-mcp",
6
6
  "type": "module",
@@ -14,7 +14,8 @@
14
14
  "dist",
15
15
  "bin",
16
16
  "storage/.gitkeep",
17
- "dist/prompts/definitions"
17
+ "dist/prompts/definitions",
18
+ "dist/prompts/server"
18
19
  ],
19
20
  "repository": {
20
21
  "type": "git",
@@ -27,7 +28,7 @@
27
28
  "author": "Muhammad Rheza Alfin <m.rheza.alfin@gmail.com>",
28
29
  "license": "MIT",
29
30
  "scripts": {
30
- "build": "rm -rf dist && npm run dashboard:build && tsup --config tsup.config.ts && mkdir -p bin && cp -r src/mcp/prompts/definitions dist/prompts/ && printf \"#!/usr/bin/env node\\nimport '../dist/dashboard/server.js';\\n\" > bin/mcp-memory-dashboard.js && shx chmod +x dist/mcp/server.js dist/dashboard/server.js bin/mcp-memory-server.js bin/mcp-memory-dashboard.js",
31
+ "build": "rm -rf dist && npm run dashboard:build && tsup --config tsup.config.ts && mkdir -p bin && cp -r src/mcp/prompts/definitions dist/prompts/ && cp -r src/mcp/prompts/server dist/prompts/ && printf \"#!/usr/bin/env node\\nimport '../dist/dashboard/server.js';\\n\" > bin/mcp-memory-dashboard.js && shx chmod +x dist/mcp/server.js dist/dashboard/server.js bin/mcp-memory-server.js bin/mcp-memory-dashboard.js",
31
32
  "dashboard:build": "vite build --config src/dashboard/ui/vite.config.ts",
32
33
  "dashboard:dev": "vite dev --config src/dashboard/ui/vite.config.ts",
33
34
  "prepare": "npm run build",