gitmem-mcp 1.0.14 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,14 +1,16 @@
1
1
  /**
2
2
  * Thread Deduplication Service (Phase 3)
3
3
  *
4
- * Pure functions for detecting duplicate threads by embedding similarity
5
- * or normalized text equality. Zero I/O — all Supabase and embedding
6
- * calls live in the caller (create-thread.ts).
4
+ * Pure functions for detecting duplicate threads by embedding similarity,
5
+ * token overlap, or normalized text equality. Zero I/O — all Supabase
6
+ * and embedding calls live in the caller (create-thread.ts).
7
7
  *
8
8
  * Strategy:
9
9
  * 1. If embedding available: cosine similarity > 0.85 → duplicate
10
- * 2. If embedding unavailable: normalized text equality → duplicate
11
- * 3. If no existing threads: skip check
10
+ * 2. Token overlap coefficient > 0.6 → duplicate (no API key needed)
11
+ * - Lowered to 0.4 when both threads share an issue prefix (e.g., OD-692:)
12
+ * 3. Normalized text equality → duplicate
13
+ * 4. If no existing threads: skip check
12
14
  */
13
15
  import type { ThreadObject } from "../types/index.js";
14
16
  export interface ThreadWithEmbedding {
@@ -21,9 +23,11 @@ export interface DedupResult {
21
23
  matched_thread_id: string | null;
22
24
  matched_text: string | null;
23
25
  similarity: number | null;
24
- method: "embedding" | "text_normalization" | "skipped";
26
+ method: "embedding" | "token_overlap" | "text_normalization" | "skipped";
25
27
  }
26
28
  export declare const DEDUP_SIMILARITY_THRESHOLD = 0.85;
29
+ export declare const TOKEN_OVERLAP_THRESHOLD = 0.6;
30
+ export declare const TOKEN_OVERLAP_ISSUE_PREFIX_THRESHOLD = 0.4;
27
31
  /**
28
32
  * Check if new thread text is a semantic duplicate of any existing open thread.
29
33
  *
@@ -43,7 +47,23 @@ export declare function cosineSimilarity(a: number[], b: number[]): number;
43
47
  */
44
48
  export declare function normalizeText(text: string): string;
45
49
  /**
46
- * Deduplicate a thread list by both ID and normalized text.
50
+ * Tokenize text into content words for overlap comparison.
51
+ * Lowercase, split on non-alphanumeric boundaries, remove stop words.
52
+ */
53
+ export declare function tokenize(text: string): Set<string>;
54
+ /**
55
+ * Overlap coefficient: |intersection| / min(|A|, |B|).
56
+ * Handles the common case where one thread is a shorter variant of another.
57
+ * Returns 0 if either set is empty.
58
+ */
59
+ export declare function tokenOverlap(a: Set<string>, b: Set<string>): number;
60
+ /**
61
+ * Extract issue prefix like "OD-692" or "PROJ-123" from thread text.
62
+ * Returns null if no prefix found.
63
+ */
64
+ export declare function extractIssuePrefix(text: string): string | null;
65
+ /**
66
+ * Deduplicate a thread list by ID, normalized text, and token overlap.
47
67
  * First-seen wins. Skips empty-text threads. Does not mutate input.
48
68
  *
49
69
  * Applied at every thread loading/merging exit point to guarantee
@@ -1,17 +1,28 @@
1
1
  /**
2
2
  * Thread Deduplication Service (Phase 3)
3
3
  *
4
- * Pure functions for detecting duplicate threads by embedding similarity
5
- * or normalized text equality. Zero I/O — all Supabase and embedding
6
- * calls live in the caller (create-thread.ts).
4
+ * Pure functions for detecting duplicate threads by embedding similarity,
5
+ * token overlap, or normalized text equality. Zero I/O — all Supabase
6
+ * and embedding calls live in the caller (create-thread.ts).
7
7
  *
8
8
  * Strategy:
9
9
  * 1. If embedding available: cosine similarity > 0.85 → duplicate
10
- * 2. If embedding unavailable: normalized text equality → duplicate
11
- * 3. If no existing threads: skip check
10
+ * 2. Token overlap coefficient > 0.6 → duplicate (no API key needed)
11
+ * - Lowered to 0.4 when both threads share an issue prefix (e.g., OD-692:)
12
+ * 3. Normalized text equality → duplicate
13
+ * 4. If no existing threads: skip check
12
14
  */
13
15
  // ---------- Constants ----------
14
16
  export const DEDUP_SIMILARITY_THRESHOLD = 0.85;
17
+ export const TOKEN_OVERLAP_THRESHOLD = 0.6;
18
+ export const TOKEN_OVERLAP_ISSUE_PREFIX_THRESHOLD = 0.4;
19
+ const STOP_WORDS = new Set([
20
+ "a", "an", "the", "and", "or", "but", "in", "on", "at", "to", "for",
21
+ "of", "with", "by", "from", "is", "it", "be", "as", "was", "are",
22
+ "been", "being", "have", "has", "had", "do", "does", "did", "will",
23
+ "that", "this", "not", "no", "so", "if", "its", "also", "into",
24
+ "than", "then", "can", "just", "about", "up", "out", "still",
25
+ ]);
15
26
  // ---------- Core ----------
16
27
  /**
17
28
  * Check if new thread text is a semantic duplicate of any existing open thread.
@@ -60,6 +71,36 @@ export function checkDuplicate(newText, newEmbedding, existingThreads) {
60
71
  method: "embedding",
61
72
  };
62
73
  }
74
+ // Token overlap check (works without any API key)
75
+ const newTokens = tokenize(newText);
76
+ const newPrefix = extractIssuePrefix(newText);
77
+ if (newTokens.size > 0) {
78
+ let bestOverlap = -1;
79
+ let bestThread = null;
80
+ for (const thread of existingThreads) {
81
+ const existingTokens = tokenize(thread.text);
82
+ if (existingTokens.size === 0)
83
+ continue;
84
+ const overlap = tokenOverlap(newTokens, existingTokens);
85
+ const existingPrefix = extractIssuePrefix(thread.text);
86
+ const threshold = newPrefix && existingPrefix && newPrefix === existingPrefix
87
+ ? TOKEN_OVERLAP_ISSUE_PREFIX_THRESHOLD
88
+ : TOKEN_OVERLAP_THRESHOLD;
89
+ if (overlap > threshold && overlap > bestOverlap) {
90
+ bestOverlap = overlap;
91
+ bestThread = thread;
92
+ }
93
+ }
94
+ if (bestThread && bestOverlap > 0) {
95
+ return {
96
+ is_duplicate: true,
97
+ matched_thread_id: bestThread.thread_id,
98
+ matched_text: bestThread.text,
99
+ similarity: round(bestOverlap, 4),
100
+ method: "token_overlap",
101
+ };
102
+ }
103
+ }
63
104
  // Text normalization fallback (conservative: exact match only)
64
105
  const normalizedNew = normalizeText(newText);
65
106
  for (const thread of existingThreads) {
@@ -106,13 +147,49 @@ export function normalizeText(text) {
106
147
  .trim()
107
148
  .replace(/[.!?;:]+$/, "");
108
149
  }
150
+ /**
151
+ * Tokenize text into content words for overlap comparison.
152
+ * Lowercase, split on non-alphanumeric boundaries, remove stop words.
153
+ */
154
+ export function tokenize(text) {
155
+ const words = text
156
+ .toLowerCase()
157
+ .split(/[^a-z0-9-]+/)
158
+ .filter((w) => w.length > 1 && !STOP_WORDS.has(w));
159
+ return new Set(words);
160
+ }
161
+ /**
162
+ * Overlap coefficient: |intersection| / min(|A|, |B|).
163
+ * Handles the common case where one thread is a shorter variant of another.
164
+ * Returns 0 if either set is empty.
165
+ */
166
+ export function tokenOverlap(a, b) {
167
+ if (a.size === 0 || b.size === 0)
168
+ return 0;
169
+ let intersection = 0;
170
+ const smaller = a.size <= b.size ? a : b;
171
+ const larger = a.size <= b.size ? b : a;
172
+ for (const word of smaller) {
173
+ if (larger.has(word))
174
+ intersection++;
175
+ }
176
+ return intersection / Math.min(a.size, b.size);
177
+ }
178
+ /**
179
+ * Extract issue prefix like "OD-692" or "PROJ-123" from thread text.
180
+ * Returns null if no prefix found.
181
+ */
182
+ export function extractIssuePrefix(text) {
183
+ const match = text.match(/^([A-Z]+-\d+)/i);
184
+ return match ? match[1].toUpperCase() : null;
185
+ }
109
186
  function round(value, decimals) {
110
187
  const factor = 10 ** decimals;
111
188
  return Math.round(value * factor) / factor;
112
189
  }
113
190
  // ---------- List Deduplication ----------
114
191
  /**
115
- * Deduplicate a thread list by both ID and normalized text.
192
+ * Deduplicate a thread list by ID, normalized text, and token overlap.
116
193
  * First-seen wins. Skips empty-text threads. Does not mutate input.
117
194
  *
118
195
  * Applied at every thread loading/merging exit point to guarantee
@@ -120,19 +197,44 @@ function round(value, decimals) {
120
197
  */
121
198
  export function deduplicateThreadList(threads) {
122
199
  const seenIds = new Set();
123
- const seenText = new Set();
124
200
  const result = [];
201
+ // Track accepted threads with their tokens for overlap comparison
202
+ const accepted = [];
125
203
  for (const thread of threads) {
126
204
  const text = thread.text || "";
127
205
  const key = normalizeText(text);
128
206
  // Skip empty-text threads
129
207
  if (!key)
130
208
  continue;
131
- // Skip if we've seen this ID or this normalized text
132
- if (seenIds.has(thread.id) || seenText.has(key))
209
+ // Skip if we've seen this ID
210
+ if (seenIds.has(thread.id))
211
+ continue;
212
+ // Check exact text match against accepted threads
213
+ const tokens = tokenize(text);
214
+ const prefix = extractIssuePrefix(text);
215
+ let isDuplicate = false;
216
+ for (const prev of accepted) {
217
+ // Exact normalized text match
218
+ if (normalizeText(prev.text) === key) {
219
+ isDuplicate = true;
220
+ break;
221
+ }
222
+ // Token overlap match
223
+ if (tokens.size > 0 && prev.tokens.size > 0) {
224
+ const overlap = tokenOverlap(tokens, prev.tokens);
225
+ const threshold = prefix && prev.prefix && prefix === prev.prefix
226
+ ? TOKEN_OVERLAP_ISSUE_PREFIX_THRESHOLD
227
+ : TOKEN_OVERLAP_THRESHOLD;
228
+ if (overlap > threshold) {
229
+ isDuplicate = true;
230
+ break;
231
+ }
232
+ }
233
+ }
234
+ if (isDuplicate)
133
235
  continue;
134
236
  seenIds.add(thread.id);
135
- seenText.add(key);
237
+ accepted.push({ text, tokens, prefix });
136
238
  result.push(thread);
137
239
  }
138
240
  return result;
@@ -56,8 +56,10 @@ export function normalizeThreads(raw, sourceSession) {
56
56
  id: parsed.id,
57
57
  text: parsed.note,
58
58
  status: parsed.status,
59
+ // Preserve existing created_at — only default to now() for genuinely new threads
59
60
  created_at: parsed.created_at || new Date().toISOString(),
60
- ...(sourceSession && { source_session: sourceSession }),
61
+ ...(sourceSession && !parsed.source_session && { source_session: sourceSession }),
62
+ ...(parsed.source_session && { source_session: parsed.source_session }),
61
63
  ...(parsed.resolved_at && { resolved_at: parsed.resolved_at }),
62
64
  };
63
65
  }
@@ -88,8 +90,9 @@ export function normalizeThreads(raw, sourceSession) {
88
90
  id: inner.id,
89
91
  text: inner.text || inner.note,
90
92
  status: inner.status || item.status,
93
+ // Preserve the earliest available created_at — never overwrite with now()
91
94
  created_at: inner.created_at || item.created_at || new Date().toISOString(),
92
- ...(sourceSession && { source_session: sourceSession }),
95
+ ...(sourceSession && !item.source_session && { source_session: sourceSession }),
93
96
  ...(inner.resolved_at && { resolved_at: inner.resolved_at }),
94
97
  };
95
98
  }
@@ -15,7 +15,7 @@
15
15
  * N_A — Scar doesn't apply, scenario comparison required
16
16
  * REFUTED — Overriding scar, risk acknowledgment required
17
17
  */
18
- import { getCurrentSession, getSurfacedScars, addConfirmations, } from "../services/session-state.js";
18
+ import { getCurrentSession, getSurfacedScars, addConfirmations, getConfirmations, } from "../services/session-state.js";
19
19
  import { Timer, buildPerformanceData } from "../services/metrics.js";
20
20
  import { getSessionPath } from "../services/gitmem-dir.js";
21
21
  import { wrapDisplay } from "../services/display-protocol.js";
@@ -23,7 +23,8 @@ import * as fs from "fs";
23
23
  // Minimum evidence length per decision type
24
24
  const MIN_EVIDENCE_LENGTH = 50;
25
25
  // Future-tense patterns — APPLYING must use past tense
26
- const FUTURE_PATTERNS = /\b(will|going to|plan to|intend to|I'll|we'll|shall|about to|aim to|expect to)\b/i;
26
+ // Only catch first-person forward-looking language, not third-person "will"
27
+ const FUTURE_PATTERNS = /\b(I will|I'll|we will|we'll|I'm going to|we're going to|I plan to|I intend to|I shall|I aim to|I expect to)\b/i;
27
28
  /**
28
29
  * Validate a single confirmation against its surfaced scar.
29
30
  * Returns null if valid, or an error string if invalid.
@@ -192,9 +193,11 @@ export async function confirmScars(params) {
192
193
  }
193
194
  }
194
195
  // Check for missing scars (all recall scars must be addressed)
196
+ // Credit scars already confirmed in a previous call this session
197
+ const previouslyConfirmedIds = new Set(getConfirmations().map(c => c.scar_id));
195
198
  const missingScars = [];
196
199
  for (const scar of recallScars) {
197
- if (!confirmedIds.has(scar.scar_id)) {
200
+ if (!confirmedIds.has(scar.scar_id) && !previouslyConfirmedIds.has(scar.scar_id)) {
198
201
  missingScars.push(scar.scar_title);
199
202
  }
200
203
  }
@@ -33,7 +33,7 @@ export interface CreateThreadResult {
33
33
  deduplicated?: boolean;
34
34
  /** Phase 3: dedup gate details */
35
35
  dedup?: {
36
- method: "embedding" | "text_normalization" | "skipped";
36
+ method: "embedding" | "token_overlap" | "text_normalization" | "skipped";
37
37
  similarity: number | null;
38
38
  matched_thread_id: string | null;
39
39
  };
@@ -172,7 +172,9 @@ export async function recall(params) {
172
172
  const matchCount = params.match_count || 3;
173
173
  const issueId = params.issue_id; // For variant assignment
174
174
  // Similarity threshold — suppress weak matches
175
- const defaultThreshold = hasSupabase() ? 0.35 : 0.4;
175
+ // Pro tier: 0.45 calibrated from UX audit (66% N_A rate at 0.35, APPLYING avg 0.55, N_A avg 0.51)
176
+ // Free tier: 0.4 (BM25 scores are relative — top result always 1.0)
177
+ const defaultThreshold = hasSupabase() ? 0.45 : 0.4;
176
178
  const similarityThreshold = params.similarity_threshold ?? defaultThreshold;
177
179
  // Free tier: use local keyword search
178
180
  if (!hasSupabase()) {
@@ -1,21 +1,23 @@
1
1
  #!/bin/bash
2
2
  # GitMem Hooks Plugin — PreToolUse Hook (Recall Check + Confirmation Gate)
3
3
  #
4
- # Two enforcement mechanisms for consequential actions:
4
+ # Two enforcement mechanisms with different trigger scopes:
5
5
  #
6
- # 1. CONFIRMATION GATE (hard block):
6
+ # 1. CONFIRMATION GATE (hard block, consequential actions only):
7
7
  # If recall() surfaced scars but confirm_scars() hasn't been called → BLOCK.
8
8
  # Uses JSON "decision: block" pattern (same as session-close-check.sh).
9
9
  # Only blocks on recall-source scars; session_start scars don't require confirmation.
10
+ # Consequential = git push, npm publish, deploy, .sql, .env files.
10
11
  #
11
- # 2. RECALL NAG (soft reminder):
12
- # If recall hasn't been called recently nudge (additionalContext, never blocks).
13
- # - If recall never called AND >3 tool calls → nag
14
- # - Cooldown: no more than once per 60 seconds
12
+ # 2. RECALL NAG (soft reminder, ALL Bash/Write/Edit actions):
13
+ # If recall hasn't been called AND agent has made 10+ tool calls → nudge.
14
+ # Never blocks just injects additionalContext.
15
+ # Cooldown: no more than once per 90 seconds.
16
+ # NOTE: hooks.json matchers already limit this to Bash/Write/Edit.
15
17
  #
16
- # Filter layer: Only triggers on consequential actions:
17
- # - Bash: git push, git tag, npm publish, deploy commands
18
- # - Write/Edit: .sql migrations, .env files
18
+ # UX audit finding: 12 sessions >30min with zero recalls because the nag was
19
+ # gated behind the consequential filter agents writing .ts files, running
20
+ # tests, etc. were never nudged. Now the nag fires for all code changes.
19
21
  #
20
22
  # Input: JSON via stdin with tool_name and tool_input
21
23
  # Output: JSON with decision:block OR additionalContext OR empty (exit 0)
@@ -88,7 +90,35 @@ read_session_count() {
88
90
  TOOL_NAME=$(parse_json "$HOOK_INPUT" ".tool_name")
89
91
 
90
92
  # ============================================================================
91
- # Filter layer: Is this a consequential action?
93
+ # Read session state (shared by both gate and nag)
94
+ # ============================================================================
95
+
96
+ RECALL_SCAR_COUNT=$(read_session_count \
97
+ '[.surfaced_scars // [] | .[] | select(.source == "recall")] | length' \
98
+ "const fs=require('fs');try{const s=JSON.parse(fs.readFileSync('$SESSION_FILE','utf8'));const c=(s.surfaced_scars||[]).filter(x=>x.source==='recall');process.stdout.write(String(c.length))}catch(e){process.stdout.write('0')}")
99
+
100
+ CONFIRMATION_COUNT=$(read_session_count \
101
+ '[.confirmations // [] | .[]] | length' \
102
+ "const fs=require('fs');try{const s=JSON.parse(fs.readFileSync('$SESSION_FILE','utf8'));process.stdout.write(String((s.confirmations||[]).length))}catch(e){process.stdout.write('0')}")
103
+
104
+ # ============================================================================
105
+ # Session state tracking (for nag logic — runs for ALL matched tool calls)
106
+ # ============================================================================
107
+
108
+ SESSION_ID="${CLAUDE_SESSION_ID:-$$}"
109
+ STATE_DIR="/tmp/gitmem-hooks-${SESSION_ID}"
110
+ mkdir -p "$STATE_DIR"
111
+
112
+ # Increment tool call count (counts all Bash/Write/Edit, not just consequential)
113
+ TOOL_COUNT=0
114
+ if [ -f "$STATE_DIR/tool_call_count" ]; then
115
+ TOOL_COUNT=$(cat "$STATE_DIR/tool_call_count")
116
+ fi
117
+ TOOL_COUNT=$((TOOL_COUNT + 1))
118
+ echo "$TOOL_COUNT" > "$STATE_DIR/tool_call_count"
119
+
120
+ # ============================================================================
121
+ # Filter layer: Is this a consequential action? (used for gate only)
92
122
  # ============================================================================
93
123
 
94
124
  IS_CONSEQUENTIAL=false
@@ -111,26 +141,13 @@ case "$TOOL_NAME" in
111
141
  ;;
112
142
  esac
113
143
 
114
- # Not consequential → pass through silently
115
- if [ "$IS_CONSEQUENTIAL" != "true" ]; then
116
- exit 0
117
- fi
118
-
119
144
  # ============================================================================
120
- # CONFIRMATION GATE (runs first — hard block takes priority over soft nag)
145
+ # CONFIRMATION GATE (hard block consequential actions only)
121
146
  # ============================================================================
122
147
  # Block if recall() surfaced scars but confirm_scars() hasn't been called.
123
148
  # Only blocks on recall-source scars; session_start scars don't require confirmation.
124
149
 
125
- RECALL_SCAR_COUNT=$(read_session_count \
126
- '[.surfaced_scars // [] | .[] | select(.source == "recall")] | length' \
127
- "const fs=require('fs');try{const s=JSON.parse(fs.readFileSync('$SESSION_FILE','utf8'));const c=(s.surfaced_scars||[]).filter(x=>x.source==='recall');process.stdout.write(String(c.length))}catch(e){process.stdout.write('0')}")
128
-
129
- CONFIRMATION_COUNT=$(read_session_count \
130
- '[.confirmations // [] | .[]] | length' \
131
- "const fs=require('fs');try{const s=JSON.parse(fs.readFileSync('$SESSION_FILE','utf8'));process.stdout.write(String((s.confirmations||[]).length))}catch(e){process.stdout.write('0')}")
132
-
133
- if [ "$RECALL_SCAR_COUNT" -gt 0 ] 2>/dev/null && [ "$CONFIRMATION_COUNT" -eq 0 ] 2>/dev/null; then
150
+ if [ "$IS_CONSEQUENTIAL" = "true" ] && [ "$RECALL_SCAR_COUNT" -gt 0 ] 2>/dev/null && [ "$CONFIRMATION_COUNT" -eq 0 ] 2>/dev/null; then
134
151
  # Get scar titles for the error message
135
152
  if command -v jq &>/dev/null; then
136
153
  SCAR_TITLES=$(jq -r '[.surfaced_scars // [] | .[] | select(.source == "recall") | .scar_title] | join(", ")' "$SESSION_FILE" 2>/dev/null || echo "(unknown)")
@@ -152,24 +169,11 @@ HOOKJSON
152
169
  fi
153
170
 
154
171
  # ============================================================================
155
- # Session state tracking (for nag logic)
156
- # ============================================================================
157
-
158
- SESSION_ID="${CLAUDE_SESSION_ID:-$$}"
159
- STATE_DIR="/tmp/gitmem-hooks-${SESSION_ID}"
160
- mkdir -p "$STATE_DIR"
161
-
162
- # Increment tool call count
163
- TOOL_COUNT=0
164
- if [ -f "$STATE_DIR/tool_call_count" ]; then
165
- TOOL_COUNT=$(cat "$STATE_DIR/tool_call_count")
166
- fi
167
- TOOL_COUNT=$((TOOL_COUNT + 1))
168
- echo "$TOOL_COUNT" > "$STATE_DIR/tool_call_count"
169
-
170
- # ============================================================================
171
- # Cooldown check: don't nag more than once per 60 seconds
172
+ # RECALL NAG (soft reminder — ALL Bash/Write/Edit, not just consequential)
172
173
  # ============================================================================
174
+ # UX audit: 12 sessions >30min with zero recalls because nag was gated behind
175
+ # consequential filter. Now fires for any code change after 10+ tool calls.
176
+ # Cooldown: 90s between nags. Never blocks — additionalContext only.
173
177
 
174
178
  NOW=$(date +%s)
175
179
  LAST_NAG=0
@@ -178,21 +182,16 @@ if [ -f "$STATE_DIR/last_nag_time" ]; then
178
182
  fi
179
183
 
180
184
  ELAPSED_SINCE_NAG=$((NOW - LAST_NAG))
181
- if [ "$ELAPSED_SINCE_NAG" -lt 60 ]; then
185
+ if [ "$ELAPSED_SINCE_NAG" -lt 90 ]; then
182
186
  exit 0
183
187
  fi
184
188
 
185
- # ============================================================================
186
- # RECALL NAG: Nudge if recall hasn't been called
187
- # ============================================================================
188
- # Check if any recall-source scars exist. If RECALL_SCAR_COUNT is 0 and
189
- # we've had >3 tool calls, the agent hasn't called recall at all → nag.
190
-
191
189
  SHOULD_NAG=false
192
190
 
193
191
  if [ "$RECALL_SCAR_COUNT" -eq 0 ] 2>/dev/null; then
194
- # No recall scars found — recall probably wasn't called
195
- if [ "$TOOL_COUNT" -gt 3 ]; then
192
+ # No recall scars found — recall hasn't been called this session
193
+ # Threshold: 10 tool calls to avoid nagging during initial exploration
194
+ if [ "$TOOL_COUNT" -gt 10 ]; then
196
195
  SHOULD_NAG=true
197
196
  fi
198
197
  fi
@@ -205,7 +204,7 @@ if [ "$SHOULD_NAG" = "true" ]; then
205
204
  echo "$NOW" > "$STATE_DIR/last_nag_time"
206
205
  cat <<'HOOKJSON'
207
206
  {
208
- "additionalContext": "GITMEM RECALL REMINDER: You're about to take a consequential action but haven't checked institutional memory recently. Consider calling `recall` (or `gitmem-r`) with your plan before proceeding. This surfaces relevant scars that may prevent repeating past mistakes."
207
+ "additionalContext": "GITMEM RECALL REMINDER: You've been working for a while without checking institutional memory. Consider calling `recall` (or `gitmem-r`) with your current plan it surfaces relevant lessons from past sessions that may save time or prevent mistakes. Example: recall({ plan: \"what I'm about to do\" })"
209
208
  }
210
209
  HOOKJSON
211
210
  fi
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "gitmem-mcp",
3
- "version": "1.0.14",
3
+ "version": "1.1.0",
4
4
  "description": "Institutional memory for AI coding agents. Memory that compounds.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -55,6 +55,7 @@
55
55
  "!hooks/tests",
56
56
  "schema",
57
57
  "CLAUDE.md.template",
58
+ "cursorrules.template",
58
59
  "README.md",
59
60
  "CHANGELOG.md"
60
61
  ],