meridian-dev 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/BOOTSTRAP_PROMPT.md +110 -0
- package/README.md +344 -0
- package/backup/hooks/session-end.sh +44 -0
- package/backup/hooks/session-start.sh +37 -0
- package/backup/setup.sh +156 -0
- package/bin/meridian.js +100 -0
- package/doctor.sh +173 -0
- package/install.sh +62 -0
- package/journal-summary.sh +577 -0
- package/package.json +42 -0
- package/setup.sh +407 -0
- package/specializations/claude-code/CLAUDE.md-global-fragment.md +52 -0
- package/specializations/claude-code/CLAUDE.md-repo-fragment.md +16 -0
- package/specializations/claude-code/README.md +96 -0
- package/specializations/claude-code/commands/doctor.md +31 -0
- package/specializations/claude-code/commands/init-memory.md +127 -0
- package/specializations/claude-code/commands/init-team.md +335 -0
- package/specializations/claude-code/commands/journal.md +66 -0
- package/specializations/claude-code/hooks/check-global-state.sh +68 -0
- package/specializations/claude-code/settings.json +10 -0
- package/specializations/cursor/README.md +112 -0
- package/specializations/cursor/global-rule.mdc +53 -0
- package/specializations/cursor/repo-rule.mdc +25 -0
- package/specializations/generic/README.md +47 -0
- package/templates/global.md +73 -0
- package/templates/memory-file.md +18 -0
- package/templates/personal-state.md +14 -0
- package/templates/product-state.md +39 -0
- package/templates/repo-state.md +18 -0
- package/templates/session-protocol-fragment.md +46 -0
- package/templates/strategy-state.md +37 -0
- package/templates/team-state.md +29 -0
- package/uninstall.sh +85 -0
|
@@ -0,0 +1,577 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# Skip Tissue — Journal Summary
|
|
3
|
+
# Aggregates structured AI session journal entries into weekly digests.
|
|
4
|
+
# Surfaces drift patterns, recurring lessons, and per-repo summaries.
|
|
5
|
+
#
|
|
6
|
+
# Journal entries are generated by Claude Code / Cursor at the end of each
|
|
7
|
+
# session in ~/.claude/memory/journal/YYYY-MM-DD.md (or ~/.ai-memory/).
|
|
8
|
+
#
|
|
9
|
+
# Usage:
|
|
10
|
+
# journal-summary.sh # This week (auto-detect dir)
|
|
11
|
+
# journal-summary.sh --last-week # Last week
|
|
12
|
+
# journal-summary.sh --from 2026-02-01 --to 2026-02-28
|
|
13
|
+
# journal-summary.sh --team ~/team-journals # Aggregate across team members
|
|
14
|
+
# journal-summary.sh --all --format markdown # Full history as Markdown
|
|
15
|
+
# journal-summary.sh --dir ~/.ai-memory/memory/journal # Cursor install path
|
|
16
|
+
|
|
17
|
+
set -euo pipefail
|
|
18
|
+
|
|
19
|
+
# ── Defaults ──────────────────────────────────────────────────────────────────
|
|
20
|
+
|
|
21
|
+
JOURNAL_DIR=""
|
|
22
|
+
TEAM_DIR=""
|
|
23
|
+
FORMAT="text"
|
|
24
|
+
RANGE_MODE="week"
|
|
25
|
+
FROM_DATE=""
|
|
26
|
+
TO_DATE=""
|
|
27
|
+
|
|
28
|
+
# ── Helpers ───────────────────────────────────────────────────────────────────
|
|
29
|
+
|
|
30
|
+
GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RESET='\033[0m'
|
|
31
|
+
info() { echo -e " $1"; }
|
|
32
|
+
warn() { echo -e "${YELLOW}⚠${RESET} $1" >&2; }
|
|
33
|
+
|
|
34
|
+
usage() {
|
|
35
|
+
cat <<USAGE
|
|
36
|
+
Usage: journal-summary.sh [OPTIONS]
|
|
37
|
+
|
|
38
|
+
Aggregates AI session journal entries into weekly digests with drift
|
|
39
|
+
detection and recurring lesson extraction.
|
|
40
|
+
|
|
41
|
+
Options:
|
|
42
|
+
--dir <path> Journal directory (default: auto-detect ~/.claude or ~/.ai-memory)
|
|
43
|
+
--team <path> Team mode: each subdirectory is a user name containing journal files
|
|
44
|
+
--week Summarize current week Mon–Sun (default)
|
|
45
|
+
--last-week Summarize last week Mon–Sun
|
|
46
|
+
--from <YYYY-MM-DD> Start date (inclusive)
|
|
47
|
+
--to <YYYY-MM-DD> End date (inclusive, default: today)
|
|
48
|
+
--all Include all available journal files
|
|
49
|
+
--format text Output as plain text (default)
|
|
50
|
+
--format markdown Output as Markdown (suitable for Notion, GitHub, Slack)
|
|
51
|
+
--help Show this message
|
|
52
|
+
|
|
53
|
+
Examples:
|
|
54
|
+
journal-summary.sh
|
|
55
|
+
journal-summary.sh --last-week
|
|
56
|
+
journal-summary.sh --from 2026-02-01 --to 2026-02-28
|
|
57
|
+
journal-summary.sh --team ~/team-journals --format markdown
|
|
58
|
+
journal-summary.sh --all --format markdown > weekly-report.md
|
|
59
|
+
|
|
60
|
+
Journal format expected (auto-generated by Skip Tissue session hooks):
|
|
61
|
+
## Repo — Session title
|
|
62
|
+
**Why:** Stated goal
|
|
63
|
+
**What:** What was actually done
|
|
64
|
+
**Outcome:** Did we hit the goal?
|
|
65
|
+
**On track?:** Yes / No — reason for drift
|
|
66
|
+
**Lessons:** What to remember next time
|
|
67
|
+
USAGE
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
# ── Argument parsing ──────────────────────────────────────────────────────────
|
|
71
|
+
|
|
72
|
+
while [[ $# -gt 0 ]]; do
|
|
73
|
+
case "$1" in
|
|
74
|
+
--dir) JOURNAL_DIR="$2"; shift 2 ;;
|
|
75
|
+
--dir=*) JOURNAL_DIR="${1#--dir=}"; shift ;;
|
|
76
|
+
--team) TEAM_DIR="$2"; shift 2 ;;
|
|
77
|
+
--team=*) TEAM_DIR="${1#--team=}"; shift ;;
|
|
78
|
+
--format) FORMAT="$2"; shift 2 ;;
|
|
79
|
+
--format=*) FORMAT="${1#--format=}"; shift ;;
|
|
80
|
+
--week) RANGE_MODE="week"; shift ;;
|
|
81
|
+
--last-week) RANGE_MODE="last-week"; shift ;;
|
|
82
|
+
--all) RANGE_MODE="all"; shift ;;
|
|
83
|
+
--from) FROM_DATE="$2"; RANGE_MODE="range"; shift 2 ;;
|
|
84
|
+
--from=*) FROM_DATE="${1#--from=}"; RANGE_MODE="range"; shift ;;
|
|
85
|
+
--to) TO_DATE="$2"; shift 2 ;;
|
|
86
|
+
--to=*) TO_DATE="${1#--to=}"; shift ;;
|
|
87
|
+
--help) usage; exit 0 ;;
|
|
88
|
+
*) echo "Unknown option: $1" >&2; usage >&2; exit 1 ;;
|
|
89
|
+
esac
|
|
90
|
+
done
|
|
91
|
+
|
|
92
|
+
if [ "$FORMAT" != "text" ] && [ "$FORMAT" != "markdown" ]; then
|
|
93
|
+
echo "Error: --format must be 'text' or 'markdown'" >&2
|
|
94
|
+
exit 1
|
|
95
|
+
fi
|
|
96
|
+
|
|
97
|
+
# ── Auto-detect journal directory ─────────────────────────────────────────────
|
|
98
|
+
|
|
99
|
+
if [ -z "$JOURNAL_DIR" ] && [ -z "$TEAM_DIR" ]; then
|
|
100
|
+
if [ -d "$HOME/.claude/memory/journal" ]; then
|
|
101
|
+
JOURNAL_DIR="$HOME/.claude/memory/journal"
|
|
102
|
+
elif [ -d "$HOME/.ai-memory/memory/journal" ]; then
|
|
103
|
+
JOURNAL_DIR="$HOME/.ai-memory/memory/journal"
|
|
104
|
+
else
|
|
105
|
+
echo "Error: no journal directory found." >&2
|
|
106
|
+
echo "Expected: ~/.claude/memory/journal or ~/.ai-memory/memory/journal" >&2
|
|
107
|
+
echo "Specify one with --dir <path> or run: journal-summary.sh --help" >&2
|
|
108
|
+
exit 1
|
|
109
|
+
fi
|
|
110
|
+
fi
|
|
111
|
+
|
|
112
|
+
# ── Date range calculation ────────────────────────────────────────────────────
|
|
113
|
+
|
|
114
|
+
TODAY=$(date +%Y-%m-%d)
|
|
115
|
+
|
|
116
|
+
date_offset() {
|
|
117
|
+
# Portable date arithmetic: returns date N days from today
|
|
118
|
+
# Works on macOS (BSD date) and Linux (GNU date)
|
|
119
|
+
local n="$1"
|
|
120
|
+
if date -v "${n}d" +%Y-%m-%d 2>/dev/null; then return; fi
|
|
121
|
+
date -d "${n} days" +%Y-%m-%d 2>/dev/null
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
week_start() {
|
|
125
|
+
# Returns Monday of the week containing the given date (YYYY-MM-DD)
|
|
126
|
+
local d="$1"
|
|
127
|
+
local dow
|
|
128
|
+
if dow=$(date -d "$d" +%u 2>/dev/null); then :; else
|
|
129
|
+
# macOS BSD date
|
|
130
|
+
dow=$(date -j -f "%Y-%m-%d" "$d" +%u 2>/dev/null || echo "1")
|
|
131
|
+
fi
|
|
132
|
+
local offset=$(( -(dow - 1) ))
|
|
133
|
+
if date -d "$d $offset days" +%Y-%m-%d 2>/dev/null; then return; fi
|
|
134
|
+
date -v "${offset}d" -j -f "%Y-%m-%d" "$d" +%Y-%m-%d 2>/dev/null
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
week_end() {
|
|
138
|
+
local start="$1"
|
|
139
|
+
local offset=6
|
|
140
|
+
if date -d "$start $offset days" +%Y-%m-%d 2>/dev/null; then return; fi
|
|
141
|
+
date -v "${offset}d" -j -f "%Y-%m-%d" "$start" +%Y-%m-%d 2>/dev/null
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
case "$RANGE_MODE" in
|
|
145
|
+
week)
|
|
146
|
+
FROM_DATE=$(week_start "$TODAY")
|
|
147
|
+
TO_DATE=$(week_end "$FROM_DATE")
|
|
148
|
+
;;
|
|
149
|
+
last-week)
|
|
150
|
+
local_start=$(week_start "$TODAY")
|
|
151
|
+
if MON_LAST=$(date -d "$local_start -7 days" +%Y-%m-%d 2>/dev/null); then :; else
|
|
152
|
+
MON_LAST=$(date -v-7d -j -f "%Y-%m-%d" "$local_start" +%Y-%m-%d 2>/dev/null)
|
|
153
|
+
fi
|
|
154
|
+
FROM_DATE="$MON_LAST"
|
|
155
|
+
TO_DATE=$(week_end "$FROM_DATE")
|
|
156
|
+
;;
|
|
157
|
+
range)
|
|
158
|
+
[ -z "$FROM_DATE" ] && { echo "Error: --from required with --to" >&2; exit 1; }
|
|
159
|
+
[ -z "$TO_DATE" ] && TO_DATE="$TODAY"
|
|
160
|
+
;;
|
|
161
|
+
all)
|
|
162
|
+
FROM_DATE="0000-01-01"
|
|
163
|
+
TO_DATE="9999-12-31"
|
|
164
|
+
;;
|
|
165
|
+
esac
|
|
166
|
+
|
|
167
|
+
# ── Collect journal files ─────────────────────────────────────────────────────
|
|
168
|
+
|
|
169
|
+
collect_files() {
|
|
170
|
+
local dir="$1"
|
|
171
|
+
local user="${2:-}" # optional user label for team mode
|
|
172
|
+
# Files named YYYY-MM-DD.md within the date range
|
|
173
|
+
while IFS= read -r f; do
|
|
174
|
+
local base
|
|
175
|
+
base=$(basename "$f" .md)
|
|
176
|
+
if [[ "$base" =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}$ ]]; then
|
|
177
|
+
if [[ "$base" > "$FROM_DATE" || "$base" == "$FROM_DATE" ]] && \
|
|
178
|
+
[[ "$base" < "$TO_DATE" || "$base" == "$TO_DATE" ]]; then
|
|
179
|
+
echo "$user|$base|$f"
|
|
180
|
+
fi
|
|
181
|
+
fi
|
|
182
|
+
done < <(find "$dir" -maxdepth 1 -name '*.md' 2>/dev/null | sort)
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
FILE_RECORDS=()
|
|
186
|
+
|
|
187
|
+
if [ -n "$TEAM_DIR" ]; then
|
|
188
|
+
# Team mode: each subdir is a user
|
|
189
|
+
while IFS= read -r subdir; do
|
|
190
|
+
user=$(basename "$subdir")
|
|
191
|
+
journal_subdir=""
|
|
192
|
+
# Accept direct .md files or a journal/ subdir
|
|
193
|
+
if [ -d "$subdir/journal" ]; then
|
|
194
|
+
journal_subdir="$subdir/journal"
|
|
195
|
+
elif [ -d "$subdir" ]; then
|
|
196
|
+
journal_subdir="$subdir"
|
|
197
|
+
fi
|
|
198
|
+
if [ -n "$journal_subdir" ]; then
|
|
199
|
+
while IFS= read -r rec; do
|
|
200
|
+
FILE_RECORDS+=("$rec")
|
|
201
|
+
done < <(collect_files "$journal_subdir" "$user")
|
|
202
|
+
fi
|
|
203
|
+
done < <(find "$TEAM_DIR" -mindepth 1 -maxdepth 1 -type d 2>/dev/null | sort)
|
|
204
|
+
else
|
|
205
|
+
while IFS= read -r rec; do
|
|
206
|
+
FILE_RECORDS+=("$rec")
|
|
207
|
+
done < <(collect_files "$JOURNAL_DIR" "")
|
|
208
|
+
fi
|
|
209
|
+
|
|
210
|
+
if [ ${#FILE_RECORDS[@]} -eq 0 ]; then
|
|
211
|
+
echo "No journal files found for the date range: $FROM_DATE → $TO_DATE"
|
|
212
|
+
echo "Journal directory: ${JOURNAL_DIR:-$TEAM_DIR}"
|
|
213
|
+
exit 0
|
|
214
|
+
fi
|
|
215
|
+
|
|
216
|
+
# ── Python parser and report generator ────────────────────────────────────────
|
|
217
|
+
|
|
218
|
+
python3 - "$FORMAT" "$FROM_DATE" "$TO_DATE" "${FILE_RECORDS[@]}" <<'PYEOF'
|
|
219
|
+
import sys
|
|
220
|
+
import re
|
|
221
|
+
from collections import defaultdict
|
|
222
|
+
|
|
223
|
+
format_md = sys.argv[1] == "markdown"
|
|
224
|
+
from_date = sys.argv[2]
|
|
225
|
+
to_date = sys.argv[3]
|
|
226
|
+
records = sys.argv[4:] # "user|date|filepath"
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
# ── Entry dataclass ────────────────────────────────────────────────────────────
|
|
230
|
+
|
|
231
|
+
class Entry:
|
|
232
|
+
__slots__ = ['date', 'user', 'repo', 'title',
|
|
233
|
+
'why', 'what', 'outcome', 'ontrack', 'lessons', 'discovery', 'drifted']
|
|
234
|
+
def __init__(self):
|
|
235
|
+
for s in self.__slots__:
|
|
236
|
+
setattr(self, s, '')
|
|
237
|
+
self.drifted = False
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
# ── Parser ─────────────────────────────────────────────────────────────────────
|
|
241
|
+
|
|
242
|
+
FIELD_MAP = {
|
|
243
|
+
'**Why:**': 'why',
|
|
244
|
+
'**What:**': 'what',
|
|
245
|
+
'**Outcome:**': 'outcome',
|
|
246
|
+
'**On track?:**': 'ontrack',
|
|
247
|
+
'**Lessons:**': 'lessons',
|
|
248
|
+
'**Discovery:**': 'discovery',
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
# Sort by longest label first to avoid prefix collisions
|
|
252
|
+
FIELD_LABELS = sorted(FIELD_MAP.keys(), key=len, reverse=True)
|
|
253
|
+
|
|
254
|
+
ENTRY_HEADER = re.compile(r'^## (?!\d{4}-\d{2}-\d{2}|Journal\b)(.+)')
|
|
255
|
+
BOLD_LABEL = re.compile(r'^\*\*[^*]+\*\*:')
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def parse_file(path, date, user):
|
|
259
|
+
entries = []
|
|
260
|
+
current = None
|
|
261
|
+
field = None
|
|
262
|
+
|
|
263
|
+
try:
|
|
264
|
+
with open(path, encoding='utf-8', errors='replace') as fh:
|
|
265
|
+
for raw in fh:
|
|
266
|
+
line = raw.rstrip('\n\r')
|
|
267
|
+
|
|
268
|
+
m = ENTRY_HEADER.match(line)
|
|
269
|
+
if m:
|
|
270
|
+
if current and current.repo:
|
|
271
|
+
entries.append(current)
|
|
272
|
+
current = Entry()
|
|
273
|
+
current.date = date
|
|
274
|
+
current.user = user
|
|
275
|
+
header = m.group(1).strip()
|
|
276
|
+
# Split on em-dash (—) or en-dash (–) surrounded by spaces
|
|
277
|
+
for sep in [' \u2014 ', ' \u2013 ', ' - ']:
|
|
278
|
+
idx = header.find(sep)
|
|
279
|
+
if idx >= 0:
|
|
280
|
+
current.repo = header[:idx].strip()
|
|
281
|
+
current.title = header[idx + len(sep):].strip()
|
|
282
|
+
break
|
|
283
|
+
else:
|
|
284
|
+
current.repo = header
|
|
285
|
+
current.title = ''
|
|
286
|
+
field = None
|
|
287
|
+
continue
|
|
288
|
+
|
|
289
|
+
if current is None:
|
|
290
|
+
continue
|
|
291
|
+
|
|
292
|
+
# Check for a known field label
|
|
293
|
+
matched = False
|
|
294
|
+
for label in FIELD_LABELS:
|
|
295
|
+
if line.startswith(label):
|
|
296
|
+
field = FIELD_MAP[label]
|
|
297
|
+
val = line[len(label):].lstrip()
|
|
298
|
+
setattr(current, field, val)
|
|
299
|
+
matched = True
|
|
300
|
+
break
|
|
301
|
+
|
|
302
|
+
if matched:
|
|
303
|
+
continue
|
|
304
|
+
|
|
305
|
+
# Any other bold heading ends the current field
|
|
306
|
+
if BOLD_LABEL.match(line):
|
|
307
|
+
field = None
|
|
308
|
+
continue
|
|
309
|
+
|
|
310
|
+
# Continuation line for the current field
|
|
311
|
+
if field and line.strip():
|
|
312
|
+
existing = getattr(current, field)
|
|
313
|
+
setattr(current, field,
|
|
314
|
+
(existing + '\n' + line) if existing else line)
|
|
315
|
+
|
|
316
|
+
except (IOError, OSError):
|
|
317
|
+
pass
|
|
318
|
+
|
|
319
|
+
if current and current.repo:
|
|
320
|
+
entries.append(current)
|
|
321
|
+
|
|
322
|
+
return entries
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
# ── Drift detection ────────────────────────────────────────────────────────────
|
|
326
|
+
|
|
327
|
+
DRIFT_RE = re.compile(
|
|
328
|
+
r'\bdrift(?:ed)?\b'
|
|
329
|
+
r'|\boff.track\b'
|
|
330
|
+
r'|\bscope.creep\b'
|
|
331
|
+
r'|\bwent.sideways\b'
|
|
332
|
+
r'|\btangent\b',
|
|
333
|
+
re.IGNORECASE
|
|
334
|
+
)
|
|
335
|
+
# Negation patterns that cancel a nearby drift signal
|
|
336
|
+
NEGATED_DRIFT_RE = re.compile(
|
|
337
|
+
r'\bno\s+drift\b'
|
|
338
|
+
r'|\bzero\s+drift\b'
|
|
339
|
+
r'|\bdid(?:n\'t|n\'t| not)\s+drift\b'
|
|
340
|
+
r'|\bnot\s+drift\b',
|
|
341
|
+
re.IGNORECASE
|
|
342
|
+
)
|
|
343
|
+
YES_START_RE = re.compile(r'^yes\b', re.IGNORECASE)
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
def is_drifted(text):
|
|
347
|
+
t = text.strip()
|
|
348
|
+
if not t:
|
|
349
|
+
return False
|
|
350
|
+
# Remove negated drift phrases before checking for drift signals
|
|
351
|
+
cleaned = NEGATED_DRIFT_RE.sub('', t)
|
|
352
|
+
# Starts with "yes" and no remaining drift signal → on track
|
|
353
|
+
if YES_START_RE.match(t) and not DRIFT_RE.search(cleaned):
|
|
354
|
+
return False
|
|
355
|
+
return bool(DRIFT_RE.search(cleaned))
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
# ── Lesson deduplication ───────────────────────────────────────────────────────
|
|
359
|
+
|
|
360
|
+
def extract_lesson_bullets(text):
|
|
361
|
+
"""Return a list of individual lesson strings (bullet or full text)."""
|
|
362
|
+
bullets = []
|
|
363
|
+
for line in text.splitlines():
|
|
364
|
+
line = line.strip().lstrip('- •').strip()
|
|
365
|
+
if len(line) > 10:
|
|
366
|
+
bullets.append(line)
|
|
367
|
+
return bullets
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
def find_recurring(all_lesson_lines, min_count=2):
|
|
371
|
+
"""Return lesson strings that appear ≥ min_count times (exact match)."""
|
|
372
|
+
counts = defaultdict(int)
|
|
373
|
+
for line in all_lesson_lines:
|
|
374
|
+
counts[line.lower().strip()] += 1
|
|
375
|
+
return [k for k, v in counts.items() if v >= min_count]
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
# ── Load all entries ───────────────────────────────────────────────────────────
|
|
379
|
+
|
|
380
|
+
all_entries = []
|
|
381
|
+
|
|
382
|
+
for rec in records:
|
|
383
|
+
parts = rec.split('|', 2)
|
|
384
|
+
if len(parts) != 3:
|
|
385
|
+
continue
|
|
386
|
+
user, date, path = parts
|
|
387
|
+
entries = parse_file(path, date, user)
|
|
388
|
+
all_entries.extend(entries)
|
|
389
|
+
|
|
390
|
+
for e in all_entries:
|
|
391
|
+
e.drifted = is_drifted(e.ontrack)
|
|
392
|
+
|
|
393
|
+
# Sort by date
|
|
394
|
+
all_entries.sort(key=lambda e: (e.date, e.user, e.repo))
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
# ── Output helpers ─────────────────────────────────────────────────────────────
|
|
398
|
+
|
|
399
|
+
def h1(text):
|
|
400
|
+
if format_md:
|
|
401
|
+
return f'# {text}'
|
|
402
|
+
return f'\n{"="*len(text)}\n{text}\n{"="*len(text)}'
|
|
403
|
+
|
|
404
|
+
def h2(text):
|
|
405
|
+
if format_md:
|
|
406
|
+
return f'\n## {text}'
|
|
407
|
+
return f'\n{text}\n{"-"*len(text)}'
|
|
408
|
+
|
|
409
|
+
def h3(text):
|
|
410
|
+
if format_md:
|
|
411
|
+
return f'\n### {text}'
|
|
412
|
+
return f'\n {text}:'
|
|
413
|
+
|
|
414
|
+
def bullet(text, indent=0):
|
|
415
|
+
pad = ' ' * indent
|
|
416
|
+
if format_md:
|
|
417
|
+
return f'{pad}- {text}'
|
|
418
|
+
return f'{pad}• {text}'
|
|
419
|
+
|
|
420
|
+
def divider():
|
|
421
|
+
return ('---' if format_md else '-' * 60)
|
|
422
|
+
|
|
423
|
+
def bold(text):
|
|
424
|
+
return f'**{text}**' if format_md else text.upper()
|
|
425
|
+
|
|
426
|
+
def italic(text):
|
|
427
|
+
return f'_{text}_' if format_md else f'({text})'
|
|
428
|
+
|
|
429
|
+
def truncate(text, max_len=120):
|
|
430
|
+
text = text.replace('\n', ' ').strip()
|
|
431
|
+
return text[:max_len - 3] + '...' if len(text) > max_len else text
|
|
432
|
+
|
|
433
|
+
|
|
434
|
+
# ── Build report ───────────────────────────────────────────────────────────────
|
|
435
|
+
|
|
436
|
+
lines = []
|
|
437
|
+
|
|
438
|
+
# Header
|
|
439
|
+
range_label = f'{from_date} → {to_date}' if from_date != '0000-01-01' else 'All time'
|
|
440
|
+
lines.append(h1(f'AI Session Journal Summary · {range_label}'))
|
|
441
|
+
|
|
442
|
+
total = len(all_entries)
|
|
443
|
+
drifted = [e for e in all_entries if e.drifted]
|
|
444
|
+
repos = sorted(set(e.repo for e in all_entries))
|
|
445
|
+
users = sorted(set(e.user for e in all_entries if e.user))
|
|
446
|
+
|
|
447
|
+
# Stats line
|
|
448
|
+
drift_pct = f'{100 * len(drifted) // total}%' if total else '—'
|
|
449
|
+
stats_parts = [
|
|
450
|
+
f'{total} session{"s" if total != 1 else ""}',
|
|
451
|
+
f'{len(repos)} repo{"s" if len(repos) != 1 else ""}',
|
|
452
|
+
f'{len(drifted)} drift{"s" if len(drifted) != 1 else ""} ({drift_pct})',
|
|
453
|
+
]
|
|
454
|
+
if users:
|
|
455
|
+
stats_parts.insert(1, f'{len(users)} contributor{"s" if len(users) != 1 else ""}')
|
|
456
|
+
|
|
457
|
+
lines.append('')
|
|
458
|
+
lines.append(italic(' '.join(stats_parts)))
|
|
459
|
+
|
|
460
|
+
# ── Per-contributor section (team mode) ──────────────────────────────────────
|
|
461
|
+
if users:
|
|
462
|
+
lines.append(h2('Contributors'))
|
|
463
|
+
for user in users:
|
|
464
|
+
user_entries = [e for e in all_entries if e.user == user]
|
|
465
|
+
user_drifted = [e for e in user_entries if e.drifted]
|
|
466
|
+
user_repos = sorted(set(e.repo for e in user_entries))
|
|
467
|
+
lines.append(bullet(
|
|
468
|
+
f'{bold(user)} — {len(user_entries)} sessions, '
|
|
469
|
+
f'{len(user_repos)} repos, '
|
|
470
|
+
f'{len(user_drifted)} drift'
|
|
471
|
+
))
|
|
472
|
+
|
|
473
|
+
# ── Sessions by repo ─────────────────────────────────────────────────────────
|
|
474
|
+
lines.append(h2('Sessions by Repo'))
|
|
475
|
+
|
|
476
|
+
for repo in repos:
|
|
477
|
+
repo_entries = [e for e in all_entries if e.repo == repo]
|
|
478
|
+
repo_drifted = sum(1 for e in repo_entries if e.drifted)
|
|
479
|
+
|
|
480
|
+
drift_note = f' {italic(f"{repo_drifted} drifted")}' if repo_drifted else ''
|
|
481
|
+
lines.append(h3(f'{repo}{drift_note}'))
|
|
482
|
+
|
|
483
|
+
for e in repo_entries:
|
|
484
|
+
prefix = e.date
|
|
485
|
+
if e.user:
|
|
486
|
+
prefix += f' [{e.user}]'
|
|
487
|
+
if e.drifted:
|
|
488
|
+
prefix += ' ⚠'
|
|
489
|
+
|
|
490
|
+
title_str = f' — {e.title}' if e.title else ''
|
|
491
|
+
lines.append(bullet(f'{prefix}{title_str}', indent=1))
|
|
492
|
+
|
|
493
|
+
if e.why:
|
|
494
|
+
lines.append(bullet(f'{bold("Goal:")} {truncate(e.why)}', indent=2))
|
|
495
|
+
if e.outcome:
|
|
496
|
+
lines.append(bullet(f'{bold("Outcome:")} {truncate(e.outcome)}', indent=2))
|
|
497
|
+
if e.discovery:
|
|
498
|
+
lines.append(bullet(f'{bold("Discovery:")} {truncate(e.discovery)}', indent=2))
|
|
499
|
+
if e.drifted and e.ontrack:
|
|
500
|
+
lines.append(bullet(f'{bold("Drift:")} {truncate(e.ontrack)}', indent=2))
|
|
501
|
+
|
|
502
|
+
# ── Discovery log (feeds PM and CTO digests) ────────────────────────────────
|
|
503
|
+
discoveries = [e for e in all_entries if e.discovery.strip()]
|
|
504
|
+
if discoveries:
|
|
505
|
+
lines.append(h2(f'Discoveries ({len(discoveries)} session{"s" if len(discoveries) != 1 else ""})'))
|
|
506
|
+
lines.append('')
|
|
507
|
+
lines.append('Things learned that challenge assumptions or need PM/CTO attention:')
|
|
508
|
+
lines.append('')
|
|
509
|
+
for e in discoveries:
|
|
510
|
+
who = f' [{e.user}]' if e.user else ''
|
|
511
|
+
title_str = f' — {e.title}' if e.title else ''
|
|
512
|
+
lines.append(bullet(f'{e.date}{who} {e.repo}{title_str}'))
|
|
513
|
+
lines.append(bullet(f'{truncate(e.discovery, 200)}', indent=1))
|
|
514
|
+
else:
|
|
515
|
+
lines.append(h2('Discoveries'))
|
|
516
|
+
lines.append(italic('No discoveries flagged in this period.'))
|
|
517
|
+
|
|
518
|
+
# ── Drift log ────────────────────────────────────────────────────────────────
|
|
519
|
+
if drifted:
|
|
520
|
+
lines.append(h2(f'Drift Log ({len(drifted)} session{"s" if len(drifted) != 1 else ""})'))
|
|
521
|
+
lines.append('')
|
|
522
|
+
lines.append('Sessions where "On track?" indicated scope creep or goal drift:')
|
|
523
|
+
lines.append('')
|
|
524
|
+
for e in drifted:
|
|
525
|
+
who = f' [{e.user}]' if e.user else ''
|
|
526
|
+
lines.append(bullet(f'{e.date}{who} {e.repo} — {e.title}'))
|
|
527
|
+
lines.append(bullet(f'{truncate(e.ontrack)}', indent=1))
|
|
528
|
+
else:
|
|
529
|
+
lines.append(h2('Drift Log'))
|
|
530
|
+
lines.append(italic('No drift detected in this period.'))
|
|
531
|
+
|
|
532
|
+
# ── Recurring lessons ────────────────────────────────────────────────────────
|
|
533
|
+
all_lesson_lines = []
|
|
534
|
+
for e in all_entries:
|
|
535
|
+
if e.lessons:
|
|
536
|
+
all_lesson_lines.extend(extract_lesson_bullets(e.lessons))
|
|
537
|
+
|
|
538
|
+
recurring = find_recurring(all_lesson_lines, min_count=2)
|
|
539
|
+
|
|
540
|
+
lines.append(h2('Recurring Lessons'))
|
|
541
|
+
if recurring:
|
|
542
|
+
lines.append('')
|
|
543
|
+
lines.append('Lessons that appeared in multiple sessions (worth adding to CLAUDE.md or global-state.md):')
|
|
544
|
+
lines.append('')
|
|
545
|
+
for lesson in sorted(recurring):
|
|
546
|
+
lines.append(bullet(lesson))
|
|
547
|
+
else:
|
|
548
|
+
lines.append(italic('No recurring lessons found (each lesson appeared only once).'))
|
|
549
|
+
|
|
550
|
+
# ── All lessons ──────────────────────────────────────────────────────────────
|
|
551
|
+
lines.append(h2('All Lessons'))
|
|
552
|
+
seen_lessons = set()
|
|
553
|
+
for e in sorted(all_entries, key=lambda x: x.date):
|
|
554
|
+
if not e.lessons:
|
|
555
|
+
continue
|
|
556
|
+
bullets_for_entry = extract_lesson_bullets(e.lessons)
|
|
557
|
+
if not bullets_for_entry:
|
|
558
|
+
continue
|
|
559
|
+
who = f' [{e.user}]' if e.user else ''
|
|
560
|
+
lines.append(h3(f'{e.date}{who} {e.repo}'))
|
|
561
|
+
for lb in bullets_for_entry:
|
|
562
|
+
key = lb.lower().strip()
|
|
563
|
+
marker = ' ♻' if key in {r.lower() for r in recurring} else ''
|
|
564
|
+
lines.append(bullet(f'{lb}{marker}'))
|
|
565
|
+
seen_lessons.add(key)
|
|
566
|
+
|
|
567
|
+
if not seen_lessons:
|
|
568
|
+
lines.append(italic('No lessons recorded in this period.'))
|
|
569
|
+
|
|
570
|
+
# ── Footer ───────────────────────────────────────────────────────────────────
|
|
571
|
+
lines.append('')
|
|
572
|
+
lines.append(divider())
|
|
573
|
+
lines.append(italic('Generated by Skip Tissue journal-summary.sh'))
|
|
574
|
+
|
|
575
|
+
print('\n'.join(lines))
|
|
576
|
+
|
|
577
|
+
PYEOF
|
package/package.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "meridian-dev",
|
|
3
|
+
"version": "1.1.0",
|
|
4
|
+
"description": "Team decision trail for AI-assisted development. The connective tissue between product, engineering, and strategy.",
|
|
5
|
+
"bin": {
|
|
6
|
+
"meridian": "bin/meridian.js"
|
|
7
|
+
},
|
|
8
|
+
"keywords": [
|
|
9
|
+
"ai",
|
|
10
|
+
"claude",
|
|
11
|
+
"cursor",
|
|
12
|
+
"memory",
|
|
13
|
+
"context",
|
|
14
|
+
"team",
|
|
15
|
+
"engineering",
|
|
16
|
+
"product",
|
|
17
|
+
"decision-trail",
|
|
18
|
+
"digest",
|
|
19
|
+
"drift-detection"
|
|
20
|
+
],
|
|
21
|
+
"author": "Greg Leizerowicz",
|
|
22
|
+
"license": "MIT",
|
|
23
|
+
"repository": {
|
|
24
|
+
"type": "git",
|
|
25
|
+
"url": "https://github.com/leizerowicz/skip-tissue"
|
|
26
|
+
},
|
|
27
|
+
"files": [
|
|
28
|
+
"bin/",
|
|
29
|
+
"setup.sh",
|
|
30
|
+
"install.sh",
|
|
31
|
+
"uninstall.sh",
|
|
32
|
+
"doctor.sh",
|
|
33
|
+
"journal-summary.sh",
|
|
34
|
+
"templates/",
|
|
35
|
+
"specializations/",
|
|
36
|
+
"backup/",
|
|
37
|
+
"BOOTSTRAP_PROMPT.md"
|
|
38
|
+
],
|
|
39
|
+
"engines": {
|
|
40
|
+
"node": ">=16"
|
|
41
|
+
}
|
|
42
|
+
}
|