@unlaxer/dve-toolkit 4.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,101 @@
1
+ #!/usr/bin/env bash
2
+ # audit-duplicates.sh — Detect self-implemented features that DxE toolkit already provides
3
+ #
4
+ # Usage:
5
+ # bash dve/kit/scripts/audit-duplicates.sh [project-dir]
6
+ # dve scan [dir] --audit
7
+
8
+ set -euo pipefail
9
+
10
+ PROJECT_DIR="${1:-.}"
11
+ PROJECT_DIR="$(cd "$PROJECT_DIR" 2>/dev/null && pwd)"
12
+ PROJECT_NAME="$(basename "$PROJECT_DIR")"
13
+
14
+ echo "🔍 Audit: ${PROJECT_NAME}"
15
+ echo ""
16
+
17
+ FINDINGS=0
18
+
19
+ # ─── Known DxE capabilities vs self-implementations ───
20
+
21
+ declare -A CAPABILITIES
22
+ # pattern → DxE toolkit that provides it
23
+ CAPABILITIES=(
24
+ ["*glossary*linker*|*GlossaryLinker*|*auto-link*"]="DDE dde-link (npx dde-link --fix)"
25
+ ["*state-machine*|*state_machine*|*workflow-engine*"]="DRE workflow engine (dre-engine init)"
26
+ ["*gap-extract*|*gap_extract*|*design-review*"]="DGE session (dge-session skill)"
27
+ ["*decision-vis*|*decision_vis*"]="DVE (dve build + dve serve)"
28
+ )
29
+
30
+ for patterns in "${!CAPABILITIES[@]}"; do
31
+ TOOLKIT="${CAPABILITIES[$patterns]}"
32
+ IFS='|' read -ra PATS <<< "$patterns"
33
+
34
+ for pat in "${PATS[@]}"; do
35
+ # Search for files matching the pattern (excluding node_modules, .git, dge/, dre/, dve/, dde/)
36
+ FOUND=$(find "$PROJECT_DIR" -path "*/node_modules" -prune -o -path "*/.git" -prune -o \
37
+ -path "*/dge" -prune -o -path "*/dre" -prune -o -path "*/dve" -prune -o -path "*/dde" -prune -o \
38
+ -name "$pat" -print 2>/dev/null | head -3)
39
+
40
+ if [ -n "$FOUND" ]; then
41
+ echo " ⚠️ Self-implementation detected:"
42
+ echo "$FOUND" | while read -r f; do
43
+ echo " ${f#$PROJECT_DIR/}"
44
+ done
45
+ echo " → Already available: ${TOOLKIT}"
46
+ echo ""
47
+ FINDINGS=$((FINDINGS + 1))
48
+ fi
49
+ done
50
+ done
51
+
52
+ # ─── Check for outdated toolkit versions ───
53
+
54
+ check_version() {
55
+ local TOOLKIT_NAME="$1"
56
+ local LOCAL_VERSION_FILE="$2"
57
+ local KIT_VERSION_FILE="$3"
58
+
59
+ if [ -f "$LOCAL_VERSION_FILE" ] && [ -f "$KIT_VERSION_FILE" ]; then
60
+ LOCAL_V=$(cat "$LOCAL_VERSION_FILE" 2>/dev/null | tr -d '[:space:]')
61
+ KIT_V=$(cat "$KIT_VERSION_FILE" 2>/dev/null | tr -d '[:space:]')
62
+ if [ "$LOCAL_V" != "$KIT_V" ] && [ -n "$LOCAL_V" ] && [ -n "$KIT_V" ]; then
63
+ echo " 📦 ${TOOLKIT_NAME}: ${LOCAL_V} → ${KIT_V} available"
64
+ echo " Run: dxe update ${TOOLKIT_NAME,,}"
65
+ echo ""
66
+ FINDINGS=$((FINDINGS + 1))
67
+ fi
68
+ fi
69
+ }
70
+
71
+ # Check known toolkit locations
72
+ DXE_HOME="${DXE_HOME:-$HOME/work/AskOS-workspace/DxE-suite}"
73
+ check_version "DGE" "$PROJECT_DIR/dge/version.txt" "$DXE_HOME/dge/kit/version.txt"
74
+ check_version "DRE" "$PROJECT_DIR/.claude/.dre-version" "$DXE_HOME/dre/kit/version.txt"
75
+
76
+ # ─── Check for unused DxE features ───
77
+
78
+ # DDE linker available but not used (no dde-link in recent git log)
79
+ if [ -d "$PROJECT_DIR/docs/glossary" ]; then
80
+ DDE_INSTALLED=$(ls "$PROJECT_DIR/node_modules/@unlaxer/dde-toolkit" 2>/dev/null | head -1)
81
+ if [ -n "$DDE_INSTALLED" ]; then
82
+ RECENT_DDE_LINK=$(git -C "$PROJECT_DIR" log --oneline -20 2>/dev/null | grep -c "dde-link" || true)
83
+ if [ "$RECENT_DDE_LINK" -eq 0 ]; then
84
+ echo " 💡 DDE linker is installed but hasn't been used recently"
85
+ echo " docs/glossary/ exists with $(ls "$PROJECT_DIR/docs/glossary/"*.md 2>/dev/null | wc -l) articles"
86
+ echo " Run: npx dde-link --check README.md"
87
+ echo ""
88
+ FINDINGS=$((FINDINGS + 1))
89
+ fi
90
+ fi
91
+ fi
92
+
93
+ # ─── Summary ───
94
+
95
+ if [ "$FINDINGS" -eq 0 ]; then
96
+ echo " ✅ No duplicates or unused features found."
97
+ else
98
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
99
+ echo " ${FINDINGS} finding(s)"
100
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
101
+ fi
@@ -0,0 +1,129 @@
1
+ #!/usr/bin/env bash
2
+ # discover-decisions.sh — Scan Claude Code logs for implicit decisions
3
+ # Detects: 1. Decision patterns in assistant text
4
+ # 2. User approval patterns (よろしく/GO/OK/うん/やろう) after proposals
5
+ #
6
+ # Usage:
7
+ # bash dve/kit/scripts/discover-decisions.sh [project-dir]
8
+ # bash dve/kit/scripts/discover-decisions.sh [project-dir] --apply
9
+
10
+ set -euo pipefail
11
+
12
+ PROJECT_DIR="${1:-.}"
13
+ PROJECT_DIR="$(cd "$PROJECT_DIR" 2>/dev/null && pwd)"
14
+ PROJECT_NAME="$(basename "$PROJECT_DIR")"
15
+ CLAUDE_LOGS_DIR="$HOME/.claude/projects"
16
+ DRY_RUN=true
17
+
18
+ for arg in "$@"; do [ "$arg" = "--apply" ] && DRY_RUN=false; done
19
+
20
+ PATH_ENCODED=$(echo "$PROJECT_DIR" | sed 's|/|-|g')
21
+ LOG_DIR=""
22
+ for d in "$CLAUDE_LOGS_DIR"/*/; do
23
+ [ -d "$d" ] || continue
24
+ [ "$(basename "$d")" = "$PATH_ENCODED" ] && LOG_DIR="$d" && break
25
+ done
26
+
27
+ [ -z "$LOG_DIR" ] && echo "No logs for ${PROJECT_NAME}" && exit 0
28
+
29
+ echo "🔍 Discovering decisions in: ${PROJECT_NAME}"
30
+ echo " Mode: $([ "$DRY_RUN" = true ] && echo "DRY RUN" || echo "APPLY")"
31
+ echo ""
32
+
33
+ TMPDIR=$(mktemp -d)
34
+ trap "rm -rf $TMPDIR" EXIT
35
+
36
+ touch "$TMPDIR/approvals.txt" "$TMPDIR/explicit.txt"
37
+
38
+ for LOGFILE in "$LOG_DIR"/*.jsonl; do
39
+ [ -f "$LOGFILE" ] || continue
40
+
41
+ # Extract explicit decision patterns from assistant text
42
+ grep '"type":"assistant"' "$LOGFILE" 2>/dev/null | \
43
+ jq -r '.message.content[]? | select(.type=="text") | .text' 2>/dev/null | \
44
+ grep -iE "にしよう|に決定|で行く|を採用|を選択|で確定|却下|方針:|決定:|確定:|Direction:|Decision:|Decided:|settled on" 2>/dev/null | \
45
+ sed 's/^[[:space:]]*//' | \
46
+ awk 'length >= 15 && length <= 300 && !/^[{}\[\]<>]/ && !/^import / && !/→ Gap 発見/' \
47
+ >> "$TMPDIR/explicit.txt" 2>/dev/null || true
48
+
49
+ # Extract user approval → assistant proposal pairs
50
+ # User says: よろしく/GO/OK/うん/やろう etc. → previous assistant text is a decision
51
+ grep '"type":"user"' "$LOGFILE" 2>/dev/null | \
52
+ jq -r '.message.content[]? | select(.type=="text") | .text' 2>/dev/null | \
53
+ grep -iE "^よろしく|^よろ$|^GO$|^OK|^ok$|^うん|^いいね|^やろう|^いこう|^進めて|^頼む|^push|^deploy|^実装して|^やって|^やる$" \
54
+ >> "$TMPDIR/user_approvals.txt" 2>/dev/null || true
55
+
56
+ done
57
+
58
+ APPROVAL_COUNT=$(wc -l < "$TMPDIR/user_approvals.txt" 2>/dev/null || echo 0)
59
+
60
+ # Deduplicate explicit decisions
61
+ sort -u "$TMPDIR/explicit.txt" > "$TMPDIR/explicit_uniq.txt"
62
+ EXPLICIT_COUNT=$(wc -l < "$TMPDIR/explicit_uniq.txt" 2>/dev/null || echo 0)
63
+
64
+ TOTAL=$((EXPLICIT_COUNT + APPROVAL_COUNT))
65
+
66
+ echo " Explicit decision patterns: ${EXPLICIT_COUNT}"
67
+ echo " User approval patterns: ${APPROVAL_COUNT}"
68
+ echo " Total: ${TOTAL}"
69
+ echo ""
70
+
71
+ if [ "$TOTAL" -eq 0 ]; then
72
+ echo " No decisions found."
73
+ exit 0
74
+ fi
75
+
76
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
77
+ echo " Discovered Decisions"
78
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
79
+ echo ""
80
+
81
+ INDEX=0
82
+ while IFS= read -r line; do
83
+ INDEX=$((INDEX + 1))
84
+ [ $INDEX -gt 30 ] && echo " ... (${EXPLICIT_COUNT} total, showing first 30)" && break
85
+ echo " 📝 ${INDEX}. ${line}"
86
+ done < "$TMPDIR/explicit_uniq.txt"
87
+
88
+ if [ "$APPROVAL_COUNT" -gt 0 ]; then
89
+ echo ""
90
+ echo " --- User Approvals (${APPROVAL_COUNT} times) ---"
91
+ sort "$TMPDIR/user_approvals.txt" | uniq -c | sort -rn | head -10 | while read -r count text; do
92
+ echo " 👍 ${text} (×${count})"
93
+ done
94
+ fi
95
+
96
+ echo ""
97
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
98
+
99
+ if [ "$DRY_RUN" = false ]; then
100
+ DD_DIR="${PROJECT_DIR}/dge/decisions"
101
+ mkdir -p "$DD_DIR"
102
+ LAST_DD=$(find "$DD_DIR" -name "DD-*.md" 2>/dev/null | grep -oP 'DD-\d+' | sort -t- -k2 -n | tail -1 | grep -oP '\d+' || echo 0)
103
+ NEXT_DD=$((LAST_DD + 1))
104
+ DD_NUM=$(printf "%03d" $NEXT_DD)
105
+ DD_FILE="${DD_DIR}/DD-${DD_NUM}-discovered.md"
106
+
107
+ {
108
+ echo "# DD-${DD_NUM}: Discovered Decisions"
109
+ echo ""
110
+ echo "- **Date**: $(date +%Y-%m-%d)"
111
+ echo "- **Status**: draft"
112
+ echo "- **Source**: discover-decisions.sh"
113
+ echo ""
114
+ echo "## Review each item: Keep / Merge / Reject"
115
+ echo ""
116
+ INDEX=0
117
+ while IFS= read -r line; do
118
+ INDEX=$((INDEX + 1))
119
+ [ $INDEX -gt 30 ] && break
120
+ echo "### ${INDEX}. ${line}"
121
+ echo "- Action: TODO"
122
+ echo ""
123
+ done < "$TMPDIR/explicit_uniq.txt"
124
+ } > "$DD_FILE"
125
+ echo " Created: ${DD_FILE}"
126
+ fi
127
+
128
+ echo ""
129
+ [ "$DRY_RUN" = true ] && echo " Add --apply to create DD draft."
@@ -0,0 +1,150 @@
1
+ #!/usr/bin/env bash
2
+ # recover-all.sh — Recover ALL DxE artifacts from Claude Code logs
3
+ # Extracts Write tool calls for: sessions, decisions, specs, annotations
4
+ #
5
+ # Usage:
6
+ # bash dve/kit/scripts/recover-all.sh [project-dir] # dry run
7
+ # bash dve/kit/scripts/recover-all.sh [project-dir] --apply # write files
8
+ # bash dve/kit/scripts/recover-all.sh --scan-all # all projects
9
+
10
+ set -euo pipefail
11
+
12
+ CLAUDE_LOGS_DIR="$HOME/.claude/projects"
13
+ DRY_RUN=true
14
+ SCAN_ALL=false
15
+
16
+ for arg in "$@"; do
17
+ [ "$arg" = "--apply" ] && DRY_RUN=false
18
+ [ "$arg" = "--scan-all" ] && SCAN_ALL=true
19
+ done
20
+
21
+ PROJECT_DIR="${1:-.}"
22
+ [ "$PROJECT_DIR" = "--scan-all" ] && PROJECT_DIR="."
23
+ [ "$PROJECT_DIR" = "--apply" ] && PROJECT_DIR="."
24
+ PROJECT_DIR="$(cd "$PROJECT_DIR" 2>/dev/null && pwd || echo "$PROJECT_DIR")"
25
+
26
+ echo "🔧 DxE Artifact Recovery"
27
+ echo " Mode: $([ "$DRY_RUN" = true ] && echo "DRY RUN" || echo "APPLY")"
28
+ echo ""
29
+
30
+ recover_project() {
31
+ local PROJ_DIR="$1"
32
+ local PROJ_NAME="$(basename "$PROJ_DIR")"
33
+
34
+ local PATH_ENCODED=$(echo "$PROJ_DIR" | sed 's|/|-|g')
35
+ local LOG_DIR=""
36
+ for d in "$CLAUDE_LOGS_DIR"/*/; do
37
+ [ -d "$d" ] || continue
38
+ [ "$(basename "$d")" = "$PATH_ENCODED" ] && LOG_DIR="$d" && break
39
+ done
40
+ [ -z "$LOG_DIR" ] && return 0
41
+
42
+ local LOG_COUNT=$(find "$LOG_DIR" -maxdepth 1 -name "*.jsonl" 2>/dev/null | wc -l)
43
+ [ "$LOG_COUNT" -eq 0 ] && return 0
44
+
45
+ echo "┌─ ${PROJ_NAME} (${LOG_COUNT} logs)"
46
+
47
+ local TMPDIR=$(mktemp -d)
48
+
49
+ # Step 1: Extract all Write tool calls to temp files
50
+ for LOGFILE in "$LOG_DIR"/*.jsonl; do
51
+ [ -f "$LOGFILE" ] || continue
52
+ grep '"type":"assistant"' "$LOGFILE" 2>/dev/null | \
53
+ jq -c '.message.content[]? | select(.type=="tool_use" and .name=="Write") | {path: .input.file_path, content: .input.content}' 2>/dev/null \
54
+ >> "$TMPDIR/writes.jsonl" || true
55
+ done
56
+
57
+ [ ! -f "$TMPDIR/writes.jsonl" ] && echo "│ No Write calls found" && echo "└──────────────────────────────" && rm -rf "$TMPDIR" && return 0
58
+
59
+ local TOTAL=$(wc -l < "$TMPDIR/writes.jsonl")
60
+ local RECOVERED=0
61
+ local UPDATED=0
62
+ local SKIPPED=0
63
+
64
+ # Step 2: Process each write
65
+ while IFS= read -r line; do
66
+ local FILE_PATH=$(echo "$line" | jq -r '.path // empty' 2>/dev/null)
67
+ [ -z "$FILE_PATH" ] || [ "$FILE_PATH" = "null" ] && continue
68
+
69
+ # Only DxE files
70
+ case "$FILE_PATH" in
71
+ *dge/sessions/*|*dge/decisions/*|*dge/specs/*|*dve/annotations/*|*dve/contexts/*) ;;
72
+ *) continue ;;
73
+ esac
74
+
75
+ # Get relative path
76
+ local REL_PATH=""
77
+ if [[ "$FILE_PATH" == "$PROJ_DIR"* ]]; then
78
+ REL_PATH="${FILE_PATH#$PROJ_DIR/}"
79
+ else
80
+ REL_PATH=$(echo "$FILE_PATH" | grep -oP "(dge|dve)/.*" || true)
81
+ [ -z "$REL_PATH" ] && continue
82
+ fi
83
+
84
+ local TARGET="${PROJ_DIR}/${REL_PATH}"
85
+
86
+ # Extract content to temp file (avoid shell variable size limits)
87
+ echo "$line" | jq -r '.content // empty' 2>/dev/null > "$TMPDIR/content.tmp"
88
+ local LOG_LINES=$(wc -l < "$TMPDIR/content.tmp")
89
+ [ "$LOG_LINES" -lt 3 ] && continue
90
+
91
+ local LOG_HAS_DIALOGUE=$(grep -cE "Scene|先輩|ナレーション|☕|👤|🎩|😰|⚔|🎨|📊" "$TMPDIR/content.tmp" 2>/dev/null || true)
92
+
93
+ if [ ! -f "$TARGET" ]; then
94
+ echo "│ ✅ [NEW] ${REL_PATH} (${LOG_LINES} lines)"
95
+ if [ "$DRY_RUN" = false ]; then
96
+ mkdir -p "$(dirname "$TARGET")"
97
+ cp "$TMPDIR/content.tmp" "$TARGET"
98
+ fi
99
+ RECOVERED=$((RECOVERED + 1))
100
+ else
101
+ local EXISTING_LINES=$(wc -l < "$TARGET")
102
+ local HAS_DIALOGUE=$(grep -cE "Scene|先輩|ナレーション|☕|👤|🎩|😰|⚔|🎨|📊" "$TARGET" 2>/dev/null || true)
103
+
104
+ # Update if log version is significantly larger and has more dialogue
105
+ if [ "$LOG_LINES" -gt "$((EXISTING_LINES + 20))" ] && [ "$LOG_HAS_DIALOGUE" -gt "$HAS_DIALOGUE" ]; then
106
+ echo "│ 🔄 [UPDATE] ${REL_PATH} (${EXISTING_LINES}→${LOG_LINES} lines, dialogue: ${HAS_DIALOGUE}→${LOG_HAS_DIALOGUE})"
107
+ if [ "$DRY_RUN" = false ]; then
108
+ cp "$TARGET" "${TARGET}.bak"
109
+ cp "$TMPDIR/content.tmp" "$TARGET"
110
+ fi
111
+ UPDATED=$((UPDATED + 1))
112
+ else
113
+ SKIPPED=$((SKIPPED + 1))
114
+ fi
115
+ fi
116
+ done < "$TMPDIR/writes.jsonl"
117
+
118
+ echo "│ Total writes: ${TOTAL} | Recovered: ${RECOVERED} | Updated: ${UPDATED} | Skipped: ${SKIPPED}"
119
+ echo "└──────────────────────────────"
120
+
121
+ rm -rf "$TMPDIR"
122
+ }
123
+
124
+ if [ "$SCAN_ALL" = true ]; then
125
+ # Discover project paths from Write tool calls in logs
126
+ DISCOVERED_PATHS=$(mktemp)
127
+ for LOG_DIR in "$CLAUDE_LOGS_DIR"/*/; do
128
+ [ -d "$LOG_DIR" ] || continue
129
+ for f in "$LOG_DIR"*.jsonl; do
130
+ [ -f "$f" ] || continue
131
+ grep '"type":"assistant"' "$f" 2>/dev/null | \
132
+ jq -r '.message.content[]? | select(.type=="tool_use" and .name=="Write") | .input.file_path // empty' 2>/dev/null | \
133
+ grep -E "dge/|dve/" | \
134
+ sed 's|/dge/.*||; s|/dve/.*||' | \
135
+ sort -u >> "$DISCOVERED_PATHS" 2>/dev/null || true
136
+ done
137
+ done
138
+ sort -u "$DISCOVERED_PATHS" | while IFS= read -r PROJ_PATH; do
139
+ [ -z "$PROJ_PATH" ] && continue
140
+ [ -d "$PROJ_PATH" ] && recover_project "$PROJ_PATH" || true
141
+ done
142
+ rm -f "$DISCOVERED_PATHS"
143
+ else
144
+ recover_project "$PROJECT_DIR"
145
+ fi
146
+
147
+ echo ""
148
+ if [ "$DRY_RUN" = true ]; then
149
+ echo "Dry run complete. Add --apply to write files."
150
+ fi
@@ -0,0 +1,190 @@
1
+ #!/usr/bin/env bash
2
+ # recover-dialogues.sh — Extract DGE dialogues from Claude Code logs
3
+ # and merge them into existing session files that lack dialogue content.
4
+ #
5
+ # Usage:
6
+ # bash dve/kit/scripts/recover-dialogues.sh [project-dir]
7
+ # bash dve/kit/scripts/recover-dialogues.sh /path/to/volta-platform
8
+ #
9
+ # What it does:
10
+ # 1. Find Claude Code logs for the project
11
+ # 2. Extract assistant text blocks containing DGE dialogue markers
12
+ # 3. Match with existing session files by theme/date
13
+ # 4. Create enriched session files with dialogue included
14
+ #
15
+ # Dry run (default): shows what would be done. Add --apply to write files.
16
+
17
+ set -euo pipefail
18
+
19
+ PROJECT_DIR="${1:-.}"
20
+ PROJECT_DIR="$(cd "$PROJECT_DIR" && pwd)"
21
+ PROJECT_NAME="$(basename "$PROJECT_DIR")"
22
+ DRY_RUN=true
23
+
24
+ for arg in "$@"; do
25
+ [ "$arg" = "--apply" ] && DRY_RUN=false
26
+ done
27
+
28
+ SESSIONS_DIR="${PROJECT_DIR}/dge/sessions"
29
+ CLAUDE_LOGS_DIR="$HOME/.claude/projects"
30
+
31
+ if [ ! -d "$SESSIONS_DIR" ]; then
32
+ echo "Error: $SESSIONS_DIR not found."
33
+ exit 1
34
+ fi
35
+
36
+ echo "🔍 Recovering DGE dialogues for: ${PROJECT_NAME}"
37
+ echo " Sessions: ${SESSIONS_DIR}"
38
+ echo " Mode: $([ "$DRY_RUN" = true ] && echo "DRY RUN" || echo "APPLY")"
39
+ echo ""
40
+
41
+ # Find Claude Code log directory for this project
42
+ # Claude Code uses the path with dashes, e.g. -home-opa-work-AskOS-workspace-volta-platform
43
+ PROJECT_PATH_ENCODED=$(echo "$PROJECT_DIR" | sed 's|/|-|g')
44
+ LOG_DIR=""
45
+ for d in "$CLAUDE_LOGS_DIR"/*; do
46
+ [ -d "$d" ] || continue
47
+ DIRNAME="$(basename "$d")"
48
+ if [ "$DIRNAME" = "$PROJECT_PATH_ENCODED" ]; then
49
+ LOG_DIR="$d"
50
+ break
51
+ fi
52
+ done
53
+
54
+ if [ -z "$LOG_DIR" ]; then
55
+ echo " No Claude Code logs found for ${PROJECT_NAME}"
56
+ echo " Expected: ${CLAUDE_LOGS_DIR}/${PROJECT_PATH_ENCODED}"
57
+ exit 0
58
+ fi
59
+
60
+ LOG_COUNT=$(find "$LOG_DIR" -maxdepth 1 -name "*.jsonl" | wc -l)
61
+ echo " Found ${LOG_COUNT} log files in ${LOG_DIR}"
62
+ echo ""
63
+
64
+ # Extract all DGE dialogue blocks from logs
65
+ TMPDIR=$(mktemp -d)
66
+ trap "rm -rf $TMPDIR" EXIT
67
+
68
+ echo " Extracting DGE dialogues from logs..."
69
+
70
+ for LOGFILE in "$LOG_DIR"/*.jsonl; do
71
+ [ -f "$LOGFILE" ] || continue
72
+ BASENAME=$(basename "$LOGFILE" .jsonl)
73
+
74
+ # Extract assistant text blocks that contain DGE markers
75
+ grep '"type":"assistant"' "$LOGFILE" 2>/dev/null | \
76
+ jq -r '.message.content[] | select(.type=="text") | .text' 2>/dev/null | \
77
+ awk '
78
+ /^#.*DGE Session|^##.*Scene [0-9]|^##.*Scene [0-9]/ { found=1 }
79
+ found { print }
80
+ /^---$/ && found && length > 100 { }
81
+ ' > "$TMPDIR/${BASENAME}.txt" 2>/dev/null || true
82
+
83
+ # Also try: look for blocks with Gap markers + character dialogue
84
+ grep '"type":"assistant"' "$LOGFILE" 2>/dev/null | \
85
+ jq -r '.message.content[] | select(.type=="text") | .text' 2>/dev/null | \
86
+ awk '
87
+ /→.*Gap 発見|Gap 発見:/ { gap++ }
88
+ /先輩.*ナレーション|☕|👤|🎩|😰|⚔|🎨|📊|🏥|😈/ { char++ }
89
+ { lines[NR] = $0 }
90
+ END {
91
+ if (gap > 0 && char > 0) {
92
+ for (i=1; i<=NR; i++) print lines[i]
93
+ }
94
+ }
95
+ ' >> "$TMPDIR/${BASENAME}.txt" 2>/dev/null || true
96
+ done
97
+
98
+ # Count extracted content
99
+ EXTRACTED=$(find "$TMPDIR" -name "*.txt" -size +100c | wc -l)
100
+ echo " Extracted dialogue from ${EXTRACTED} log files"
101
+ echo ""
102
+
103
+ # Match with session files
104
+ RECOVERED=0
105
+ SKIPPED=0
106
+
107
+ for SESSION_FILE in "$SESSIONS_DIR"/*.md; do
108
+ [ -f "$SESSION_FILE" ] || continue
109
+ FNAME="$(basename "$SESSION_FILE")"
110
+
111
+ # Check if session already has dialogue
112
+ HAS_DIALOGUE=$(grep -cE "Scene|先輩|ナレーション|☕|👤|🎩" "$SESSION_FILE" 2>/dev/null || true)
113
+ if [ "$HAS_DIALOGUE" -gt 2 ]; then
114
+ continue # Already has dialogue
115
+ fi
116
+
117
+ # Extract theme from session file
118
+ THEME=$(grep -m1 "テーマ\|theme\|Theme" "$SESSION_FILE" 2>/dev/null | sed 's/.*[::] *//' | head -1)
119
+ DATE=$(echo "$FNAME" | grep -oP '^\d{4}-\d{2}-\d{2}' || true)
120
+
121
+ if [ -z "$THEME" ] && [ -z "$DATE" ]; then
122
+ continue
123
+ fi
124
+
125
+ # Search extracted dialogues for matching content
126
+ BEST_MATCH=""
127
+ BEST_SCORE=0
128
+
129
+ for EXTRACTED_FILE in "$TMPDIR"/*.txt; do
130
+ [ -s "$EXTRACTED_FILE" ] || continue
131
+ SCORE=0
132
+
133
+ # Match by theme keywords
134
+ if [ -n "$THEME" ]; then
135
+ KEYWORDS=$(echo "$THEME" | tr '  /・' '\n' | head -5)
136
+ for KW in $KEYWORDS; do
137
+ [ ${#KW} -lt 2 ] && continue
138
+ KW_COUNT=$(grep -ci "$KW" "$EXTRACTED_FILE" 2>/dev/null || true)
139
+ SCORE=$((SCORE + KW_COUNT))
140
+ done
141
+ fi
142
+
143
+ # Match by date
144
+ if [ -n "$DATE" ] && grep -q "$DATE" "$EXTRACTED_FILE" 2>/dev/null; then
145
+ SCORE=$((SCORE + 10))
146
+ fi
147
+
148
+ if [ "$SCORE" -gt "$BEST_SCORE" ]; then
149
+ BEST_SCORE=$SCORE
150
+ BEST_MATCH="$EXTRACTED_FILE"
151
+ fi
152
+ done
153
+
154
+ if [ "$BEST_SCORE" -gt 3 ] && [ -n "$BEST_MATCH" ]; then
155
+ DIALOGUE_LINES=$(wc -l < "$BEST_MATCH")
156
+ echo " ✅ ${FNAME}"
157
+ echo " Theme: ${THEME:-N/A} | Score: ${BEST_SCORE} | Lines: ${DIALOGUE_LINES}"
158
+
159
+ if [ "$DRY_RUN" = false ]; then
160
+ # Backup original
161
+ cp "$SESSION_FILE" "${SESSION_FILE}.bak"
162
+
163
+ # Append dialogue section
164
+ {
165
+ echo ""
166
+ echo "---"
167
+ echo ""
168
+ echo "## 会話劇(ログから復元)"
169
+ echo ""
170
+ cat "$BEST_MATCH"
171
+ } >> "$SESSION_FILE"
172
+
173
+ echo " → Appended to ${SESSION_FILE}"
174
+ fi
175
+ RECOVERED=$((RECOVERED + 1))
176
+ else
177
+ SKIPPED=$((SKIPPED + 1))
178
+ fi
179
+ done
180
+
181
+ echo ""
182
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
183
+ echo " Recovered: ${RECOVERED}"
184
+ echo " Skipped: ${SKIPPED} (no matching dialogue found)"
185
+ if [ "$DRY_RUN" = true ] && [ "$RECOVERED" -gt 0 ]; then
186
+ echo ""
187
+ echo " Dry run complete. Add --apply to write files:"
188
+ echo " bash dve/kit/scripts/recover-dialogues.sh ${PROJECT_DIR} --apply"
189
+ fi
190
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"