@vheins/local-memory-mcp 0.10.0 → 0.10.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,14 +1,92 @@
1
1
  // src/mcp/capabilities.ts
2
- import { fileURLToPath } from "url";
2
+ import { fileURLToPath as fileURLToPath2 } from "url";
3
+ import path2 from "path";
4
+
5
+ // src/mcp/prompts/loader.ts
6
+ import fs from "fs";
3
7
  import path from "path";
4
- var __dirname = path.dirname(fileURLToPath(import.meta.url));
8
+ import { fileURLToPath } from "url";
9
+ import matter from "gray-matter";
10
+ var __filename = fileURLToPath(import.meta.url);
11
+ var __dirname = path.dirname(__filename);
12
+ function findPromptDir() {
13
+ const candidates = [
14
+ // Production if chunked into dist/
15
+ "./prompts",
16
+ // Production if inlined into dist/mcp/
17
+ "../prompts",
18
+ // Dev: /src/mcp/prompts/definitions (next to loader.ts)
19
+ "./definitions"
20
+ ].map((relPath) => path.resolve(__dirname, relPath));
21
+ for (const dir of candidates) {
22
+ if (fs.existsSync(dir)) {
23
+ const files = fs.readdirSync(dir);
24
+ if (files.some((f) => f.endsWith(".md"))) {
25
+ return dir;
26
+ }
27
+ }
28
+ }
29
+ return path.resolve(__dirname, "./definitions");
30
+ }
31
+ var PROMPT_DIR = findPromptDir();
32
+ function listPromptFiles() {
33
+ if (!fs.existsSync(PROMPT_DIR)) return [];
34
+ return fs.readdirSync(PROMPT_DIR).filter((file) => file.endsWith(".md")).map((file) => file.replace(/\.md$/, "")).sort();
35
+ }
36
+ function loadPromptFromMarkdown(name) {
37
+ const filePath = path.join(PROMPT_DIR, `${name}.md`);
38
+ if (!fs.existsSync(filePath)) {
39
+ throw new Error(`Prompt file not found: ${filePath}`);
40
+ }
41
+ const fileContent = fs.readFileSync(filePath, "utf-8");
42
+ const { data, content } = matter(fileContent);
43
+ return {
44
+ name: data.name || name,
45
+ description: data.description || "",
46
+ arguments: data.arguments || [],
47
+ agent: data.agent,
48
+ content: content.trim()
49
+ };
50
+ }
51
+ function findServerInstructionsDir() {
52
+ const candidates = [
53
+ // Production if chunked into dist/
54
+ "./prompts/server",
55
+ // Production if inlined into dist/mcp/
56
+ "../prompts/server",
57
+ // Dev: /src/mcp/prompts/server (next to loader.ts)
58
+ "./server"
59
+ ].map((relPath) => path.resolve(__dirname, relPath));
60
+ for (const dir of candidates) {
61
+ if (fs.existsSync(dir)) {
62
+ const filePath = path.join(dir, "instructions.md");
63
+ if (fs.existsSync(filePath)) {
64
+ return dir;
65
+ }
66
+ }
67
+ }
68
+ return path.resolve(__dirname, "./server");
69
+ }
70
+ var SERVER_DIR = findServerInstructionsDir();
71
+ function loadServerInstructions() {
72
+ const filePath = path.join(SERVER_DIR, "instructions.md");
73
+ if (!fs.existsSync(filePath)) {
74
+ throw new Error(`Server instructions file not found: ${filePath}`);
75
+ }
76
+ const fileContent = fs.readFileSync(filePath, "utf-8");
77
+ const { content } = matter(fileContent);
78
+ return content.trim();
79
+ }
80
+
81
+ // src/mcp/capabilities.ts
82
+ var __dirname2 = path2.dirname(fileURLToPath2(import.meta.url));
5
83
  var pkgVersion = "0.1.0";
6
- if ("0.10.0") {
7
- pkgVersion = "0.10.0";
84
+ if ("0.10.2") {
85
+ pkgVersion = "0.10.2";
8
86
  } else {
9
- let searchDir = __dirname;
87
+ let searchDir = __dirname2;
10
88
  for (let i = 0; i < 5; i++) {
11
- const candidate = path.join(searchDir, "package.json");
89
+ const candidate = path2.join(searchDir, "package.json");
12
90
  try {
13
91
  if (fs.existsSync(candidate)) {
14
92
  const pkg = JSON.parse(fs.readFileSync(candidate, "utf8"));
@@ -19,56 +97,14 @@ if ("0.10.0") {
19
97
  }
20
98
  } catch {
21
99
  }
22
- searchDir = path.dirname(searchDir);
100
+ searchDir = path2.dirname(searchDir);
23
101
  }
24
102
  }
25
103
  var MCP_PROTOCOL_VERSION = "2025-03-26";
26
- var SERVER_INSTRUCTIONS = `
27
- Local Memory MCP \u2014 persistent memory, task coordination, and coding standards for AI agents.
28
-
29
- ## When to use this server
30
- Use at the START of every session and before any implementation work:
31
- 1. Call \`task-list\` to sync active/pending tasks for the current repository.
32
- 2. Call \`handoff-list\` to check pending context transfers. Close stale handoffs with \`handoff-update\`.
33
- 3. Call \`memory-search\` and \`memory-synthesize\` to hydrate architectural context before coding.
34
- 4. Call \`standard-search\` when implementation may be governed by language/stack conventions.
35
-
36
- ## Core Workflows
37
-
38
- **Memory**: \`memory-search\` \u2192 \`memory-detail\` \u2192 \`memory-store\` / \`memory-update\`
39
- - Store only durable knowledge (architecture, patterns, decisions, fixes).
40
- - Use \`memory-acknowledge\` after generating code from memory results.
41
- - Global scope only for cross-repo rules; prefer repo-specific scope.
42
-
43
- **Tasks**: \`task-list\` \u2192 \`task-claim\` \u2192 \`task-update\` (in_progress \u2192 completed)
44
- - Register planned steps via \`task-create\` before execution.
45
- - Never skip intermediate \`in_progress\` state before \`completed\`.
46
- - Completing a task auto-releases claims and expires linked handoffs.
47
-
48
- **Standards**: \`standard-search\` \u2192 \`standard-store\`
49
- - One rule per entry. Treat as normative implementation contracts, not docs summaries.
50
-
51
- **Handoffs/Claims**: \`handoff-list\` \u2192 \`handoff-create\` / \`handoff-update\` | \`task-claim\` / \`claim-release\`
52
- - Create handoffs only for unfinished work with concrete next owner or next steps.
53
- - Do NOT create handoffs as completion summaries \u2014 put those on \`task-update\` comments.
54
-
55
- ## Available Prompts (invoke as slash commands)
56
- - \`session-planner\` \u2014 orient and plan at session start
57
- - \`task-memory-executor\` \u2014 execute tasks with memory and standard enforcement
58
- - \`senior-code-review\` \u2014 full code review against stored standards
59
- - \`memory-guided-review\` \u2014 review using project memory as context
60
- - \`architecture-design\` \u2014 architectural planning and ADR generation
61
- - \`technical-planning\` \u2014 feature planning with task decomposition
62
- - \`root-cause-analysis\` \u2014 structured bug / incident investigation
63
- - \`fix-suggestion\` \u2014 propose and validate fixes
64
- - \`security-triage\` \u2014 security risk assessment
65
- - \`learning-retrospective\` \u2014 capture lessons and update memory
66
- - \`documentation-sync\` \u2014 sync docs with current codebase state
67
- - \`project-briefing\` \u2014 generate repository briefing from memory
68
- `.trim();
104
+ var SERVER_INSTRUCTIONS = loadServerInstructions();
69
105
  var CAPABILITIES = {
70
106
  serverInfo: {
71
- name: "mcp-memory-local",
107
+ name: "local-memory-mcp",
72
108
  version: pkgVersion,
73
109
  instructions: SERVER_INSTRUCTIONS
74
110
  },
@@ -89,7 +125,7 @@ var CAPABILITIES = {
89
125
  };
90
126
 
91
127
  // src/mcp/utils/logger.ts
92
- import fs from "fs";
128
+ import fs2 from "fs";
93
129
  var LEVELS = {
94
130
  debug: 0,
95
131
  info: 1,
@@ -209,11 +245,11 @@ function addLogSink(sink) {
209
245
  }
210
246
  var LOG_LEVEL_VALUES = Object.keys(LEVELS);
211
247
  function createFileSink(logDir, maxFiles = 5) {
212
- fs.mkdirSync(logDir, { recursive: true });
213
- const existing = fs.readdirSync(logDir).filter((f) => f.startsWith("mcp-") && f.endsWith(".log")).sort();
248
+ fs2.mkdirSync(logDir, { recursive: true });
249
+ const existing = fs2.readdirSync(logDir).filter((f) => f.startsWith("mcp-") && f.endsWith(".log")).sort();
214
250
  while (existing.length >= maxFiles) {
215
251
  try {
216
- fs.unlinkSync(`${logDir}/${existing.shift()}`);
252
+ fs2.unlinkSync(`${logDir}/${existing.shift()}`);
217
253
  } catch {
218
254
  }
219
255
  }
@@ -223,7 +259,7 @@ function createFileSink(logDir, maxFiles = 5) {
223
259
  const line = `${(/* @__PURE__ */ new Date()).toISOString()} [${payload.level.toUpperCase()}] [pid:${process.pid}] ${JSON.stringify(payload.data)}
224
260
  `;
225
261
  try {
226
- fs.appendFileSync(logFile, line);
262
+ fs2.appendFileSync(logFile, line);
227
263
  } catch {
228
264
  }
229
265
  };
@@ -231,8 +267,8 @@ function createFileSink(logDir, maxFiles = 5) {
231
267
 
232
268
  // src/mcp/storage/sqlite.ts
233
269
  import Database from "better-sqlite3";
234
- import path3 from "path";
235
- import fs3 from "fs";
270
+ import path4 from "path";
271
+ import fs4 from "fs";
236
272
  import os from "os";
237
273
 
238
274
  // src/mcp/storage/migrations.ts
@@ -2724,8 +2760,8 @@ var HandoffEntity = class extends BaseEntity {
2724
2760
 
2725
2761
  // src/mcp/storage/write-lock.ts
2726
2762
  import lockfile from "proper-lockfile";
2727
- import path2 from "path";
2728
- import fs2 from "fs";
2763
+ import path3 from "path";
2764
+ import fs3 from "fs";
2729
2765
  var LOCK_STALE_MS = 3e4;
2730
2766
  var LOCK_RETRY_DELAY_MS = 200;
2731
2767
  var LOCK_RETRY_COUNT = 250;
@@ -2734,9 +2770,9 @@ var WriteLock = class {
2734
2770
  locked = false;
2735
2771
  constructor(dbPath) {
2736
2772
  this.lockTarget = dbPath;
2737
- if (!fs2.existsSync(dbPath)) {
2738
- fs2.mkdirSync(path2.dirname(dbPath), { recursive: true });
2739
- fs2.writeFileSync(dbPath, "");
2773
+ if (!fs3.existsSync(dbPath)) {
2774
+ fs3.mkdirSync(path3.dirname(dbPath), { recursive: true });
2775
+ fs3.writeFileSync(dbPath, "");
2740
2776
  }
2741
2777
  }
2742
2778
  /**
@@ -2788,13 +2824,13 @@ var WriteLock = class {
2788
2824
  // src/mcp/storage/sqlite.ts
2789
2825
  function resolveDbPath() {
2790
2826
  if (process.env.MEMORY_DB_PATH) return process.env.MEMORY_DB_PATH;
2791
- const standardConfigDir = process.platform === "win32" ? path3.join(os.homedir(), ".local-memory-mcp") : process.platform === "darwin" ? path3.join(os.homedir(), "Library", "Application Support", "local-memory-mcp") : path3.join(os.homedir(), ".config", "local-memory-mcp");
2792
- const standardPath = path3.join(standardConfigDir, "memory.db");
2793
- if (fs3.existsSync(standardPath)) return standardPath;
2794
- const legacyPath = path3.join(os.homedir(), ".config", "local-memory-mcp", "memory.db");
2795
- if (fs3.existsSync(legacyPath)) return legacyPath;
2796
- const localCwdFile = path3.join(process.cwd(), "storage", "memory.db");
2797
- if (fs3.existsSync(localCwdFile)) return localCwdFile;
2827
+ const standardConfigDir = process.platform === "win32" ? path4.join(os.homedir(), ".local-memory-mcp") : process.platform === "darwin" ? path4.join(os.homedir(), "Library", "Application Support", "local-memory-mcp") : path4.join(os.homedir(), ".config", "local-memory-mcp");
2828
+ const standardPath = path4.join(standardConfigDir, "memory.db");
2829
+ if (fs4.existsSync(standardPath)) return standardPath;
2830
+ const legacyPath = path4.join(os.homedir(), ".config", "local-memory-mcp", "memory.db");
2831
+ if (fs4.existsSync(legacyPath)) return legacyPath;
2832
+ const localCwdFile = path4.join(process.cwd(), "storage", "memory.db");
2833
+ if (fs4.existsSync(localCwdFile)) return localCwdFile;
2798
2834
  return standardPath;
2799
2835
  }
2800
2836
  var DB_PATH = resolveDbPath();
@@ -2813,9 +2849,9 @@ var SQLiteStore = class _SQLiteStore {
2813
2849
  const finalPath = dbPath ?? DB_PATH;
2814
2850
  this.dbPathInstance = finalPath;
2815
2851
  if (finalPath !== ":memory:") {
2816
- const dbDir = path3.dirname(finalPath);
2817
- if (!fs3.existsSync(dbDir)) {
2818
- fs3.mkdirSync(dbDir, { recursive: true });
2852
+ const dbDir = path4.dirname(finalPath);
2853
+ if (!fs4.existsSync(dbDir)) {
2854
+ fs4.mkdirSync(dbDir, { recursive: true });
2819
2855
  }
2820
2856
  }
2821
2857
  this.db = new Database(finalPath);
@@ -2870,12 +2906,12 @@ var SQLiteStore = class _SQLiteStore {
2870
2906
  */
2871
2907
  _attemptRecovery(dbPath) {
2872
2908
  const backupPath = dbPath + ".backup";
2873
- if (fs3.existsSync(backupPath)) {
2909
+ if (fs4.existsSync(backupPath)) {
2874
2910
  logger.warn("[SQLiteStore] Attempting recovery from backup", { backupPath });
2875
2911
  try {
2876
2912
  const corruptPath = `${dbPath}.corrupt_${(/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "").slice(0, 15)}`;
2877
- fs3.copyFileSync(dbPath, corruptPath);
2878
- fs3.copyFileSync(backupPath, dbPath);
2913
+ fs4.copyFileSync(dbPath, corruptPath);
2914
+ fs4.copyFileSync(backupPath, dbPath);
2879
2915
  logger.warn("[SQLiteStore] Recovery successful. Corrupt file saved to", { corruptPath });
2880
2916
  } catch (err) {
2881
2917
  logger.error("[SQLiteStore] Recovery failed", { error: String(err) });
@@ -2893,7 +2929,7 @@ var SQLiteStore = class _SQLiteStore {
2893
2929
  try {
2894
2930
  this.db.pragma("wal_checkpoint(PASSIVE)");
2895
2931
  const backupPath = this.dbPathInstance + ".backup";
2896
- fs3.copyFileSync(this.dbPathInstance, backupPath);
2932
+ fs4.copyFileSync(this.dbPathInstance, backupPath);
2897
2933
  } catch (err) {
2898
2934
  logger.warn("[SQLiteStore] Backup failed", { error: String(err) });
2899
2935
  }
@@ -3016,8 +3052,8 @@ var RealVectorStore = class {
3016
3052
  };
3017
3053
 
3018
3054
  // src/mcp/session.ts
3019
- import path4 from "path";
3020
- import { fileURLToPath as fileURLToPath2 } from "url";
3055
+ import path5 from "path";
3056
+ import { fileURLToPath as fileURLToPath3 } from "url";
3021
3057
  function createSessionContext() {
3022
3058
  return {
3023
3059
  roots: [],
@@ -3082,7 +3118,7 @@ function getFilesystemRoots(session) {
3082
3118
  for (const root of session.roots) {
3083
3119
  if (!root.uri.startsWith("file://")) continue;
3084
3120
  try {
3085
- resolved.push(path4.resolve(fileURLToPath2(root.uri)));
3121
+ resolved.push(path5.resolve(fileURLToPath3(root.uri)));
3086
3122
  } catch {
3087
3123
  }
3088
3124
  }
@@ -3091,19 +3127,19 @@ function getFilesystemRoots(session) {
3091
3127
  function isPathWithinRoots(targetPath, session) {
3092
3128
  const roots = getFilesystemRoots(session);
3093
3129
  if (roots.length === 0) return true;
3094
- const normalizedTarget = path4.resolve(targetPath);
3130
+ const normalizedTarget = path5.resolve(targetPath);
3095
3131
  return roots.some((rootPath) => {
3096
- const relative = path4.relative(rootPath, normalizedTarget);
3097
- return relative === "" || !relative.startsWith("..") && !path4.isAbsolute(relative);
3132
+ const relative = path5.relative(rootPath, normalizedTarget);
3133
+ return relative === "" || !relative.startsWith("..") && !path5.isAbsolute(relative);
3098
3134
  });
3099
3135
  }
3100
3136
  function findContainingRoot(targetPath, session) {
3101
3137
  const roots = getFilesystemRoots(session);
3102
3138
  if (roots.length === 0) return null;
3103
- const normalizedTarget = path4.resolve(targetPath);
3139
+ const normalizedTarget = path5.resolve(targetPath);
3104
3140
  for (const rootPath of roots) {
3105
- const relative = path4.relative(rootPath, normalizedTarget);
3106
- if (relative === "" || !relative.startsWith("..") && !path4.isAbsolute(relative)) {
3141
+ const relative = path5.relative(rootPath, normalizedTarget);
3142
+ if (relative === "" || !relative.startsWith("..") && !path5.isAbsolute(relative)) {
3107
3143
  return rootPath;
3108
3144
  }
3109
3145
  }
@@ -3112,7 +3148,7 @@ function findContainingRoot(targetPath, session) {
3112
3148
  function inferRepoFromSession(session) {
3113
3149
  const roots = getFilesystemRoots(session);
3114
3150
  if (roots.length === 1) {
3115
- return path4.basename(roots[0]);
3151
+ return path5.basename(roots[0]);
3116
3152
  }
3117
3153
  return void 0;
3118
3154
  }
@@ -3227,7 +3263,7 @@ var SingleTaskCreateSchema = z.object({
3227
3263
  doc_path: z.string().optional(),
3228
3264
  tags: z.array(z.string()).optional(),
3229
3265
  metadata: z.record(z.string(), z.any()).optional(),
3230
- parent_id: z.string().uuid().optional(),
3266
+ parent_id: z.string().optional(),
3231
3267
  depends_on: z.string().uuid().optional(),
3232
3268
  est_tokens: z.number().int().min(0).optional()
3233
3269
  });
@@ -3245,7 +3281,7 @@ var TaskCreateSchema = z.object({
3245
3281
  doc_path: z.string().optional(),
3246
3282
  tags: z.array(z.string()).optional(),
3247
3283
  metadata: z.record(z.string(), z.any()).optional(),
3248
- parent_id: z.string().uuid().optional(),
3284
+ parent_id: z.string().optional(),
3249
3285
  depends_on: z.string().uuid().optional(),
3250
3286
  est_tokens: z.number().int().min(0).optional(),
3251
3287
  // Allow bulk tasks
@@ -3279,7 +3315,7 @@ var TaskUpdateSchema = z.object({
3279
3315
  doc_path: z.string().optional(),
3280
3316
  tags: z.array(z.string()).optional(),
3281
3317
  metadata: z.record(z.string(), z.any()).optional(),
3282
- parent_id: z.string().uuid().optional(),
3318
+ parent_id: z.string().optional(),
3283
3319
  depends_on: z.string().uuid().optional(),
3284
3320
  est_tokens: z.number().int().min(0).optional(),
3285
3321
  force: z.boolean().optional(),
@@ -4079,7 +4115,7 @@ var TOOL_DEFINITIONS = [
4079
4115
  doc_path: { type: "string" },
4080
4116
  tags: { type: "array", items: { type: "string" } },
4081
4117
  metadata: { type: "object" },
4082
- parent_id: { type: "string", format: "uuid" },
4118
+ parent_id: { type: "string", description: "Optional parent task ID (UUID) or parent task code (e.g. TASK-001). Resolved to UUID before storing." },
4083
4119
  depends_on: { type: "string", format: "uuid" },
4084
4120
  est_tokens: { type: "number", minimum: 0, description: "Estimated tokens budget for this task" },
4085
4121
  tasks: {
@@ -4104,7 +4140,7 @@ var TOOL_DEFINITIONS = [
4104
4140
  doc_path: { type: "string" },
4105
4141
  tags: { type: "array", items: { type: "string" } },
4106
4142
  metadata: { type: "object" },
4107
- parent_id: { type: "string", format: "uuid" },
4143
+ parent_id: { type: "string", description: "Optional parent task ID (UUID) or parent task code (e.g. TASK-001). Resolved to UUID before storing." },
4108
4144
  depends_on: { type: "string", format: "uuid" },
4109
4145
  est_tokens: { type: "number", minimum: 0 }
4110
4146
  },
@@ -4173,7 +4209,7 @@ var TOOL_DEFINITIONS = [
4173
4209
  doc_path: { type: "string" },
4174
4210
  tags: { type: "array", items: { type: "string" } },
4175
4211
  metadata: { type: "object" },
4176
- parent_id: { type: "string", format: "uuid" },
4212
+ parent_id: { type: "string", description: "Optional parent task ID (UUID) or parent task code (e.g. TASK-001). Resolved to UUID before storing." },
4177
4213
  depends_on: { type: "string", format: "uuid" },
4178
4214
  est_tokens: {
4179
4215
  type: "number",
@@ -4252,7 +4288,7 @@ var TOOL_DEFINITIONS = [
4252
4288
  properties: {
4253
4289
  repo: {
4254
4290
  type: "string",
4255
- description: "Repository name"
4291
+ description: "Repository name (required)"
4256
4292
  },
4257
4293
  status: {
4258
4294
  type: "string",
@@ -4707,7 +4743,7 @@ var TOOL_DEFINITIONS = [
4707
4743
  {
4708
4744
  name: "standard-search",
4709
4745
  title: "Standard Search",
4710
- description: "NAVIGATION LAYER: Returns a compact pointer table of matching coding standards. Use `standard-detail` to fetch full content for a selected result.",
4746
+ description: "MANDATORY PRE-IMPLEMENTATION CHECK: Call before any code edit, test edit, refactor, migration, or implementation decision to find applicable coding standards. Returns a compact pointer table; use `standard-detail` for relevant results. If no relevant standards are returned, continue and state that no applicable standards were found.",
4711
4747
  annotations: {
4712
4748
  readOnlyHint: true,
4713
4749
  idempotentHint: true,
@@ -5207,53 +5243,6 @@ function invalidCompletionParams(message) {
5207
5243
  return error;
5208
5244
  }
5209
5245
 
5210
- // src/mcp/prompts/loader.ts
5211
- import fs4 from "fs";
5212
- import path5 from "path";
5213
- import { fileURLToPath as fileURLToPath3 } from "url";
5214
- import matter from "gray-matter";
5215
- var __filename = fileURLToPath3(import.meta.url);
5216
- var __dirname2 = path5.dirname(__filename);
5217
- function findPromptDir() {
5218
- const candidates = [
5219
- // Production if chunked into dist/
5220
- "./prompts",
5221
- // Production if inlined into dist/mcp/
5222
- "../prompts",
5223
- // Dev: /src/mcp/prompts/definitions (next to loader.ts)
5224
- "./definitions"
5225
- ].map((relPath) => path5.resolve(__dirname2, relPath));
5226
- for (const dir of candidates) {
5227
- if (fs4.existsSync(dir)) {
5228
- const files = fs4.readdirSync(dir);
5229
- if (files.some((f) => f.endsWith(".md"))) {
5230
- return dir;
5231
- }
5232
- }
5233
- }
5234
- return path5.resolve(__dirname2, "./definitions");
5235
- }
5236
- var PROMPT_DIR = findPromptDir();
5237
- function listPromptFiles() {
5238
- if (!fs4.existsSync(PROMPT_DIR)) return [];
5239
- return fs4.readdirSync(PROMPT_DIR).filter((file) => file.endsWith(".md")).map((file) => file.replace(/\.md$/, "")).sort();
5240
- }
5241
- function loadPromptFromMarkdown(name) {
5242
- const filePath = path5.join(PROMPT_DIR, `${name}.md`);
5243
- if (!fs4.existsSync(filePath)) {
5244
- throw new Error(`Prompt file not found: ${filePath}`);
5245
- }
5246
- const fileContent = fs4.readFileSync(filePath, "utf-8");
5247
- const { data, content } = matter(fileContent);
5248
- return {
5249
- name: data.name || name,
5250
- description: data.description || "",
5251
- arguments: data.arguments || [],
5252
- agent: data.agent,
5253
- content: content.trim()
5254
- };
5255
- }
5256
-
5257
5246
  // src/mcp/prompts/registry.ts
5258
5247
  function createPromptDefinition(loaded) {
5259
5248
  return {
@@ -10,7 +10,7 @@ import {
10
10
  createFileSink,
11
11
  listResources,
12
12
  logger
13
- } from "../chunk-VWOKV6W2.js";
13
+ } from "../chunk-FFPZ7JBM.js";
14
14
 
15
15
  // src/dashboard/server.ts
16
16
  import express from "express";
@@ -57,7 +57,7 @@ import {
57
57
  toContextSlug,
58
58
  updateSessionFromInitialize,
59
59
  updateSessionRoots
60
- } from "../chunk-VWOKV6W2.js";
60
+ } from "../chunk-FFPZ7JBM.js";
61
61
 
62
62
  // src/mcp/server.ts
63
63
  import readline from "readline";
@@ -739,6 +739,14 @@ function capitalize2(str) {
739
739
 
740
740
  // src/mcp/tools/task.manage.ts
741
741
  import { randomUUID as randomUUID2 } from "crypto";
742
+ var UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
743
+ function resolveParentId(value, repo, storage) {
744
+ if (!value) return null;
745
+ if (UUID_REGEX.test(value)) return value;
746
+ const parent = storage.tasks.getTaskByCode(repo, value);
747
+ if (!parent) throw new Error(`parent_id: task with code '${value}' not found in repo '${repo}'`);
748
+ return parent.id;
749
+ }
742
750
  function describeTaskListFilter(status) {
743
751
  if (!status) return "active";
744
752
  if (status === "all") return "all";
@@ -932,7 +940,7 @@ async function handleTaskCreate(args, storage) {
932
940
  if (normalizedStatus === "pending") {
933
941
  if (initialStats.todo + pendingInRequestCount >= 10) {
934
942
  throw new Error(
935
- `Cannot create task '${taskData.task_code}' as 'pending'. Maximum of 10 pending tasks reached.`
943
+ `Cannot create task '${taskData.task_code}' as 'pending'. Maximum of 10 pending tasks reached. Please use status 'backlog' for new tasks instead.`
936
944
  );
937
945
  }
938
946
  }
@@ -962,7 +970,7 @@ async function handleTaskCreate(args, storage) {
962
970
  est_tokens: taskData.est_tokens ?? 0,
963
971
  tags,
964
972
  metadata: taskData.metadata || {},
965
- parent_id: taskData.parent_id || null,
973
+ parent_id: resolveParentId(taskData.parent_id, repo, storage),
966
974
  depends_on: taskData.depends_on || null
967
975
  };
968
976
  storage.tasks.insertTask(task2);
@@ -1005,7 +1013,7 @@ async function handleTaskCreate(args, storage) {
1005
1013
  if (status === "pending") {
1006
1014
  const stats = storage.tasks.getTaskStats(repo);
1007
1015
  if (stats.todo >= 10) {
1008
- throw new Error(`Cannot create task as 'pending'. Maximum of 10 pending tasks reached.`);
1016
+ throw new Error(`Cannot create task as 'pending'. Maximum of 10 pending tasks reached. Please use status 'backlog' for new tasks instead.`);
1009
1017
  }
1010
1018
  }
1011
1019
  const taskId = randomUUID2();
@@ -1036,7 +1044,7 @@ async function handleTaskCreate(args, storage) {
1036
1044
  est_tokens: est_tokens ?? 0,
1037
1045
  tags: finalTags,
1038
1046
  metadata: metadata || {},
1039
- parent_id: parent_id || null,
1047
+ parent_id: resolveParentId(parent_id, repo, storage),
1040
1048
  depends_on: depends_on || null
1041
1049
  };
1042
1050
  storage.tasks.insertTask(task);
@@ -1202,6 +1210,9 @@ async function handleTaskUpdate(args, storage, vectors2) {
1202
1210
  throw new Error(`Duplicate task_code: '${updates.task_code}' already exists`);
1203
1211
  }
1204
1212
  const finalUpdates = { ...updates };
1213
+ if (updates.parent_id !== void 0) {
1214
+ finalUpdates.parent_id = resolveParentId(updates.parent_id, repo, storage);
1215
+ }
1205
1216
  if (updates.phase !== void 0 || updates.tags !== void 0) {
1206
1217
  let currentTags = updates.tags || existingTask.tags || [];
1207
1218
  currentTags = currentTags.filter((t) => !t.startsWith("phase:"));
@@ -7,7 +7,6 @@ arguments:
7
7
  required: true
8
8
  agent: Task Planner
9
9
  ---
10
- # Skill: create-task (Task Creation Orchestrator)
11
10
 
12
11
  ## 🚫 FORBIDDEN: NON-EXECUTION
13
12
  DO NOT edit/create/delete files, run commands, or implement code.
@@ -18,7 +17,7 @@ ONLY call MCP tools. No prose, no code, no plans outside MCP.
18
17
 
19
18
  ## 1. PRE-ANALYSIS
20
19
  1. **Search Memory**: Call `memory-search` (architecture/history).
21
- 2. **Search Standards**: Call `standard-search` when coding conventions may constrain the task.
20
+ 2. **Search Standards**: Call `standard-search` for tasks that may lead to code edits, test edits, refactors, migrations, or implementation decisions. If no relevant standards are returned, note that no applicable standards were found.
22
21
  3. **Check Handoffs**: Call `handoff-list` for pending context that may already describe unfinished work. Ignore or close stale handoffs that only describe completed work.
23
22
  4. **Research Codebase**: Read relevant source files to verify current implementation and paths.
24
23
  5. **De-duplicate**: Call `task-list`. DO NOT duplicate existing tasks. Link related tasks via `parent_id`/`depends_on`.
@@ -13,7 +13,6 @@ arguments:
13
13
  required: true
14
14
  agent: Integration Architect
15
15
  ---
16
- # Skill: export-task-to-github
17
16
 
18
17
  ## 1. RETRIEVE
19
18
  1. **Fetch**: Call `task-detail` for `task_id`.
@@ -4,7 +4,6 @@ description: Import GitHub Issues as local tasks.
4
4
  arguments: []
5
5
  agent: Integration Scout
6
6
  ---
7
- # Skill: import-github-issues
8
7
 
9
8
  ## 1. FETCH
10
9
  - **Primary**: Use `github-mcp-server` to list open issues.
@@ -20,7 +20,7 @@ You are a memory-aware agent. Memory is project truth, not a suggestion.
20
20
  1. **Orient**: Call `task-list` for active work and `handoff-list` for pending transfers when starting a repository session. Close stale pending handoffs with `handoff-update` when they no longer describe unfinished work.
21
21
  2. **Claim**: Use `task-claim` before taking ownership of a concrete task. Use `claim-list` when ownership is unclear and `claim-release` to clear stale claims during reassignment.
22
22
  3. **Search**: Call `memory-search` with `current_file_path` and `current_tags` before coding.
23
- 4. **Standards**: Call `standard-search` when implementation may be governed by coding standards.
23
+ 4. **Standards**: Call `standard-search` before any code edit, test edit, refactor, migration, or implementation decision. Use the task intent, affected files, inferred language, stack, and repo as filters. If no relevant standards are returned, continue and state that no applicable standards were found.
24
24
  5. **Retrieve**: Use `memory-detail` for full content if search pointer rows are insufficient.
25
25
  6. **Select**: Use ONLY highly relevant memories and standards.
26
26
 
@@ -11,7 +11,7 @@ Briefing Steps:
11
11
  2. **Tasks**: Call `task-list` for `in_progress,pending` tasks.
12
12
  3. **Handoffs**: Call `handoff-list` with `status=pending` to surface transfer context. Treat only handoffs with unfinished work, blockers, next owner, or linked task as active.
13
13
  4. **Memory**: Call `memory-search` or `memory-recap` for recent decisions, patterns, and mistakes; hydrate important entries with `memory-detail`.
14
- 5. **Standards**: Call `standard-search` with current repo/stack when implementation guidance is needed.
14
+ 5. **Standards**: Call `standard-search` with current repo/stack as the mandatory pre-implementation standards check. If no relevant standards are returned, say no applicable standards were found.
15
15
  6. **Core Context**: Summarize active task, pending handoffs, applicable standards, and top architectural decisions.
16
16
  7. **Priority Reminder**: Treat task priority with MCP semantics: `1=Low`, `2=Normal`, `3=Medium`, `4=High`, `5=Critical`.
17
17
  8. **Action**: Propose next steps based on the active queue.
@@ -7,7 +7,6 @@ arguments:
7
7
  required: false
8
8
  agent: Quality Auditor
9
9
  ---
10
- # Skill: review-and-audit (Audit Agent)
11
10
 
12
11
  ## 1. ANALYSIS
13
12
  1. **Sequential Discovery**: Explore docs and code sequentially. NO parallel sub-agents.
@@ -24,7 +23,7 @@ ONLY call MCP tools. No prose, code, or external plans.
24
23
 
25
24
  ## 2. PRE-TASK ANALYSIS
26
25
  1. **Search**: Call `memory-search` (Hybrid Search). 0.55 similarity threshold.
27
- 2. **Standards**: Call `standard-search` when implementation conventions are relevant.
26
+ 2. **Standards**: Call `standard-search` before creating implementation tasks so task scope reflects applicable coding standards. If no relevant standards are returned, note that no applicable standards were found.
28
27
  3. **Handoffs**: Call `handoff-list` for pending transfer context related to the target. Treat handoffs as active only when they contain unfinished work, blockers, a next owner, or a linked task.
29
28
  4. **De-duplicate**: Call `task-list`. Skip existing/redundant tasks. Link via `parent_id`/`depends_on`.
30
29
 
@@ -13,7 +13,6 @@ arguments:
13
13
  required: false
14
14
  agent: Quality Auditor
15
15
  ---
16
- # Skill: review-and-post-issue (Audit Agent)
17
16
 
18
17
  ## 1. ANALYSIS
19
18
  1. **Sequential Discovery**: Explore docs and code sequentially. NO parallel sub-agents.
@@ -0,0 +1,46 @@
1
+ ---
2
+ name: server-instructions
3
+ description: Main instructions for the MCP server
4
+ ---
5
+ Local Memory MCP — persistent memory, task coordination, and coding standards for AI agents.
6
+
7
+ ## When to use this server
8
+ Use at the START of every session and before any implementation work:
9
+ 1. Call `task-list` to sync active/pending tasks for the current repository.
10
+ 2. Call `handoff-list` to check pending context transfers. Close stale handoffs with `handoff-update`.
11
+ 3. Call `memory-search` and `memory-synthesize` to hydrate architectural context before coding.
12
+ 4. Call `standard-search` before any code edit, test edit, refactor, migration, or implementation decision. This is mandatory even for small tasks; use the task intent, affected files, inferred language, stack, and repo as filters. If no relevant standards are returned, continue and state that no applicable standards were found.
13
+
14
+ ## Core Workflows
15
+
16
+ **Memory**: `memory-search` → `memory-detail` → `memory-store` / `memory-update`
17
+ - Store only durable knowledge (architecture, patterns, decisions, fixes).
18
+ - Use `memory-acknowledge` after generating code from memory results.
19
+ - Global scope only for cross-repo rules; prefer repo-specific scope.
20
+
21
+ **Tasks**: `task-list` → `task-claim` → `task-update` (in_progress → completed)
22
+ - Register planned steps via `task-create` before execution.
23
+ - Never skip intermediate `in_progress` state before `completed`.
24
+ - Completing a task auto-releases claims and expires linked handoffs.
25
+
26
+ **Standards**: `standard-search` → `standard-store`
27
+ - `standard-search` is the pre-implementation gate for code, tests, refactors, migrations, and implementation decisions.
28
+ - One rule per entry. Treat as normative implementation contracts, not docs summaries.
29
+
30
+ **Handoffs/Claims**: `handoff-list` → `handoff-create` / `handoff-update` | `task-claim` / `claim-release`
31
+ - Create handoffs only for unfinished work with concrete next owner or next steps.
32
+ - Do NOT create handoffs as completion summaries — put those on `task-update` comments.
33
+
34
+ ## Available Prompts (invoke as slash commands)
35
+ - `session-planner` — orient and plan at session start
36
+ - `task-memory-executor` — execute tasks with memory and standard enforcement
37
+ - `senior-code-review` — full code review against stored standards
38
+ - `memory-guided-review` — review using project memory as context
39
+ - `architecture-design` — architectural planning and ADR generation
40
+ - `technical-planning` — feature planning with task decomposition
41
+ - `root-cause-analysis` — structured bug / incident investigation
42
+ - `fix-suggestion` — propose and validate fixes
43
+ - `security-triage` — security risk assessment
44
+ - `learning-retrospective` — capture lessons and update memory
45
+ - `documentation-sync` — sync docs with current codebase state
46
+ - `project-briefing` — generate repository briefing from memory
@@ -11,7 +11,7 @@ Plan execution for: '{{objective}}'.
11
11
 
12
12
  Steps:
13
13
  1. **Orient**: Call `task-list` to avoid duplicate active/backlog work.
14
- 2. **Standards**: Call `standard-search` if the objective touches implementation conventions.
14
+ 2. **Standards**: Call `standard-search` for objectives that may lead to code edits, test edits, refactors, migrations, or implementation decisions. If no relevant standards are returned, state that no applicable standards were found.
15
15
  3. **Handoffs**: Call `handoff-list` for pending context that may affect sequencing. Stale pending handoffs that only summarize completed work should be closed with `handoff-update`, not planned as queue work.
16
16
  4. **Analyze**: Break into 3-7 atomic, verifiable tasks.
17
17
  5. **Phase**: Group into `research`, `implementation`, and `validation`.
@@ -16,7 +16,7 @@ agent: Project Manager
16
16
  ## 2. DETAIL TOOLS
17
17
  - **Tasks**: Call `task-detail` for history/comments (ID or `task_code`).
18
18
  - **Memory**: Call `memory-detail` for full entry content.
19
- - **Standards**: Call `standard-search` before implementation when coding standards may apply.
19
+ - **Standards**: Call `standard-search` before any code edit, test edit, refactor, migration, or implementation decision. If no relevant standards are returned, continue and state that no applicable standards were found.
20
20
  - **Handoffs**: Call `handoff-list` to discover pending context transfers before starting a task. Close stale handoffs with `handoff-update` when no concrete next owner, unfinished task, or blocker remains.
21
21
 
22
22
  ## 3. WORKFLOW
@@ -4,7 +4,6 @@ description: Sequentially execute pending tasks for current repository.
4
4
  arguments: []
5
5
  agent: Task Executor
6
6
  ---
7
- # Skill: task-memory-executor
8
7
 
9
8
  ## 1. SYNC & FILTER
10
9
  1. **Identify**: Get repo name (git/context).
@@ -27,7 +26,7 @@ agent: Task Executor
27
26
  3. **Claim**: Use `task-claim` with `task_code` or `task_id` before implementation.
28
27
  4. **Start**: `task-update` status to `in_progress` (MUST transition: `pending` → `in_progress`). Add agent/role metadata.
29
28
  5. **Research**: Call `memory-search` (Hybrid Search) and hydrate relevant results with `memory-detail`.
30
- 6. **Standards (MANDATORY PER TASK)**: Call `standard-search` for every task inside the execution loop before implementation, using the task intent plus inferred language/stack/repo as filters. This is required even for decomposed tasks and sub-agent assignments, so each task execution remains aligned with current standards. Apply only relevant standards and hydrate details when needed.
29
+ 6. **Standards (MANDATORY PER TASK)**: Call `standard-search` for every task inside the execution loop before any code edit, test edit, refactor, migration, or implementation decision, using the task intent, affected files, inferred language, stack, and repo as filters. This is required even for small tasks, decomposed tasks, and sub-agent assignments. Apply only relevant standards, hydrate details when needed, and if no relevant standards are returned, continue and state that no applicable standards were found.
31
30
  7. **Execute**:
32
31
  - **Trace**: Inspect logic, call sites, and docs. DO NOT infer from file presence.
33
32
  - **Logic**: Implement per description/intent.
@@ -24,7 +24,7 @@ Use the tools in the same flow exposed by the dashboard: navigate with compact l
24
24
  - **Automatic cleanup**: Moving a task to `completed` or `canceled` automatically releases active claims and expires pending handoffs linked to that task.
25
25
 
26
26
  ## 3. Standards Flow
27
- - **Search first**: Use `standard-search` to find coding standards by `query`, `language`, `stack`, `repo`, and `is_global`.
27
+ - **Search first**: `standard-search` is mandatory before any code edit, test edit, refactor, migration, or implementation decision. Search by `query`, `language`, `stack`, `repo`, and `is_global`; if no relevant standards are returned, continue and state that no applicable standards were found.
28
28
  - **Apply precisely**: Treat standards as implementation rules, not generic documentation summaries.
29
29
  - **Store atomically**: Use `standard-store` for one rule per entry with `name`, `content`, `context`, `version`, `language`, `stack`, `tags`, and correct repo/global scope.
30
30
  - **Scope**: Prefer repo-specific standards for local conventions; use global standards only for cross-repo rules.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@vheins/local-memory-mcp",
3
- "version": "0.10.0",
3
+ "version": "0.10.2",
4
4
  "description": "MCP Local Memory Service for coding copilot agents",
5
5
  "mcpName": "io.github.vheins/local-memory-mcp",
6
6
  "type": "module",
@@ -14,7 +14,8 @@
14
14
  "dist",
15
15
  "bin",
16
16
  "storage/.gitkeep",
17
- "dist/prompts/definitions"
17
+ "dist/prompts/definitions",
18
+ "dist/prompts/server"
18
19
  ],
19
20
  "repository": {
20
21
  "type": "git",
@@ -27,7 +28,7 @@
27
28
  "author": "Muhammad Rheza Alfin <m.rheza.alfin@gmail.com>",
28
29
  "license": "MIT",
29
30
  "scripts": {
30
- "build": "rm -rf dist && npm run dashboard:build && tsup --config tsup.config.ts && mkdir -p bin && cp -r src/mcp/prompts/definitions dist/prompts/ && printf \"#!/usr/bin/env node\\nimport '../dist/dashboard/server.js';\\n\" > bin/mcp-memory-dashboard.js && shx chmod +x dist/mcp/server.js dist/dashboard/server.js bin/mcp-memory-server.js bin/mcp-memory-dashboard.js",
31
+ "build": "rm -rf dist && npm run dashboard:build && tsup --config tsup.config.ts && mkdir -p bin && cp -r src/mcp/prompts/definitions dist/prompts/ && cp -r src/mcp/prompts/server dist/prompts/ && printf \"#!/usr/bin/env node\\nimport '../dist/dashboard/server.js';\\n\" > bin/mcp-memory-dashboard.js && shx chmod +x dist/mcp/server.js dist/dashboard/server.js bin/mcp-memory-server.js bin/mcp-memory-dashboard.js",
31
32
  "dashboard:build": "vite build --config src/dashboard/ui/vite.config.ts",
32
33
  "dashboard:dev": "vite dev --config src/dashboard/ui/vite.config.ts",
33
34
  "prepare": "npm run build",