agileflow 2.43.0 → 2.45.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,123 @@
1
+ #!/bin/bash
2
+ #
3
+ # AgileFlow PreCompact Hook
4
+ # Outputs critical context that should survive conversation compaction.
5
+ #
6
+
7
+ # Get current version from package.json
8
+ VERSION=$(node -p "require('./package.json').version" 2>/dev/null || echo "unknown")
9
+
10
+ # Get current git branch
11
+ BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown")
12
+
13
+ # Get current story from status.json
14
+ CURRENT_STORY=""
15
+ WIP_COUNT=0
16
+ if [ -f "docs/09-agents/status.json" ]; then
17
+ CURRENT_STORY=$(node -p "
18
+ const s = require('./docs/09-agents/status.json');
19
+ const stories = Object.entries(s.stories || {})
20
+ .filter(([,v]) => v.status === 'in_progress')
21
+ .map(([k,v]) => k + ': ' + v.title)
22
+ .join(', ');
23
+ stories || 'None in progress';
24
+ " 2>/dev/null || echo "Unable to read")
25
+
26
+ WIP_COUNT=$(node -p "
27
+ const s = require('./docs/09-agents/status.json');
28
+ Object.values(s.stories || {}).filter(v => v.status === 'in_progress').length;
29
+ " 2>/dev/null || echo "0")
30
+ fi
31
+
32
+ # Get practices list
33
+ PRACTICES=""
34
+ if [ -d "docs/02-practices" ]; then
35
+ PRACTICES=$(ls docs/02-practices/*.md 2>/dev/null | head -8 | xargs -I {} basename {} .md | tr '\n' ',' | sed 's/,$//')
36
+ fi
37
+
38
+ # Get active epics
39
+ EPICS=""
40
+ if [ -d "docs/05-epics" ]; then
41
+ EPICS=$(ls docs/05-epics/ 2>/dev/null | head -5 | tr '\n' ',' | sed 's/,$//')
42
+ fi
43
+
44
+ # Detect active commands and extract their Compact Summaries
45
+ COMMAND_SUMMARIES=""
46
+ if [ -f "docs/09-agents/session-state.json" ]; then
47
+ ACTIVE_COMMANDS=$(node -p "
48
+ const s = require('./docs/09-agents/session-state.json');
49
+ (s.active_commands || []).map(c => c.name).join(' ');
50
+ " 2>/dev/null || echo "")
51
+
52
+ for ACTIVE_COMMAND in $ACTIVE_COMMANDS; do
53
+ [ -z "$ACTIVE_COMMAND" ] && continue
54
+
55
+ COMMAND_FILE=""
56
+ if [ -f "packages/cli/src/core/commands/${ACTIVE_COMMAND}.md" ]; then
57
+ COMMAND_FILE="packages/cli/src/core/commands/${ACTIVE_COMMAND}.md"
58
+ elif [ -f ".agileflow/commands/${ACTIVE_COMMAND}.md" ]; then
59
+ COMMAND_FILE=".agileflow/commands/${ACTIVE_COMMAND}.md"
60
+ elif [ -f ".claude/commands/agileflow/${ACTIVE_COMMAND}.md" ]; then
61
+ COMMAND_FILE=".claude/commands/agileflow/${ACTIVE_COMMAND}.md"
62
+ fi
63
+
64
+ if [ ! -z "$COMMAND_FILE" ]; then
65
+ SUMMARY=$(node -e "
66
+ const fs = require('fs');
67
+ const content = fs.readFileSync('$COMMAND_FILE', 'utf8');
68
+ const match = content.match(/<!-- COMPACT_SUMMARY_START[\\s\\S]*?-->([\\s\\S]*?)<!-- COMPACT_SUMMARY_END -->/);
69
+ if (match) {
70
+ console.log('## ACTIVE COMMAND: /agileflow:${ACTIVE_COMMAND}');
71
+ console.log('');
72
+ console.log(match[1].trim());
73
+ }
74
+ " 2>/dev/null || echo "")
75
+
76
+ if [ ! -z "$SUMMARY" ]; then
77
+ COMMAND_SUMMARIES="${COMMAND_SUMMARIES}
78
+
79
+ ${SUMMARY}"
80
+ fi
81
+ fi
82
+ done
83
+ fi
84
+
85
+ # Output context
86
+ cat << EOF
87
+ AGILEFLOW PROJECT CONTEXT (preserve during compact):
88
+
89
+ ## Project Status
90
+ - Project: AgileFlow v${VERSION}
91
+ - Branch: ${BRANCH}
92
+ - Active Stories: ${CURRENT_STORY}
93
+ - WIP Count: ${WIP_COUNT}
94
+
95
+ ## Key Files to Check After Compact
96
+ - CLAUDE.md - Project system prompt with conventions
97
+ - README.md - Project overview and setup
98
+ - docs/09-agents/status.json - Story statuses and assignments
99
+ - docs/02-practices/ - Codebase practices (${PRACTICES:-check folder})
100
+
101
+ ## Active Epics
102
+ ${EPICS:-Check docs/05-epics/ for epic files}
103
+
104
+ ## Key Conventions (from CLAUDE.md)
105
+ $(grep -A 15 "## Key\|## Critical\|## Important\|CRITICAL:" CLAUDE.md 2>/dev/null | head -20 || echo "- Read CLAUDE.md for project conventions")
106
+
107
+ ## Recent Agent Activity
108
+ $(tail -3 docs/09-agents/bus/log.jsonl 2>/dev/null | head -3 || echo "")
109
+ EOF
110
+
111
+ # Output active command summaries
112
+ if [ ! -z "$COMMAND_SUMMARIES" ]; then
113
+ echo "$COMMAND_SUMMARIES"
114
+ fi
115
+
116
+ cat << EOF
117
+
118
+ ## Post-Compact Actions
119
+ 1. Re-read CLAUDE.md if unsure about conventions
120
+ 2. Check status.json for current story state
121
+ 3. Review docs/02-practices/ for implementation patterns
122
+ 4. Check git log for recent changes
123
+ EOF
@@ -0,0 +1,259 @@
1
+ #!/bin/bash
2
+ #
3
+ # validate-expertise.sh - Validate Agent Expert expertise.yaml files
4
+ #
5
+ # Purpose: Ensure expertise files remain accurate and useful over time
6
+ #
7
+ # Checks performed:
8
+ # 1. Schema validation (required fields: domain, last_updated, version)
9
+ # 2. Staleness check (last_updated > 30 days old)
10
+ # 3. File size check (warn if > 200 lines)
11
+ # 4. Learnings check (warn if empty - never self-improved)
12
+ #
13
+ # Usage:
14
+ # ./scripts/validate-expertise.sh # Validate all expertise files
15
+ # ./scripts/validate-expertise.sh database # Validate specific domain
16
+ # ./scripts/validate-expertise.sh --help # Show help
17
+ #
18
+ # Exit codes:
19
+ # 0 - All checks passed
20
+ # 1 - One or more checks failed (warnings don't cause failure)
21
+ #
22
+
23
+ set -e
24
+
25
+ # Colors
26
+ RED='\033[0;31m'
27
+ YELLOW='\033[1;33m'
28
+ GREEN='\033[0;32m'
29
+ BLUE='\033[0;34m'
30
+ NC='\033[0m' # No Color
31
+
32
+ # Configuration
33
+ EXPERTS_DIR="packages/cli/src/core/experts"
34
+ STALE_THRESHOLD_DAYS=30
35
+ MAX_LINES=200
36
+
37
+ # Counters
38
+ TOTAL=0
39
+ PASSED=0
40
+ WARNINGS=0
41
+ FAILED=0
42
+
43
+ # Help message
44
+ show_help() {
45
+ echo "Usage: $0 [domain]"
46
+ echo ""
47
+ echo "Validate Agent Expert expertise.yaml files"
48
+ echo ""
49
+ echo "Options:"
50
+ echo " domain Validate only the specified domain (e.g., 'database', 'testing')"
51
+ echo " --help Show this help message"
52
+ echo ""
53
+ echo "Checks performed:"
54
+ echo " - Schema validation (domain, last_updated, version fields)"
55
+ echo " - Staleness check (last_updated > $STALE_THRESHOLD_DAYS days)"
56
+ echo " - File size check (> $MAX_LINES lines)"
57
+ echo " - Learnings check (empty learnings array)"
58
+ echo ""
59
+ echo "Examples:"
60
+ echo " $0 # Validate all expertise files"
61
+ echo " $0 database # Validate only database domain"
62
+ echo " $0 --help # Show this help"
63
+ }
64
+
65
+ # Check if yq is available (for better YAML parsing)
66
+ has_yq() {
67
+ command -v yq &> /dev/null
68
+ }
69
+
70
+ # Extract YAML field using grep (fallback if yq not available)
71
+ get_yaml_field() {
72
+ local file="$1"
73
+ local field="$2"
74
+ grep "^${field}:" "$file" 2>/dev/null | sed "s/^${field}:[[:space:]]*//" | tr -d '"' || echo ""
75
+ }
76
+
77
+ # Check if learnings is empty
78
+ learnings_empty() {
79
+ local file="$1"
80
+ # Check for "learnings: []" or learnings with only comments
81
+ if grep -q "^learnings: \[\]" "$file" 2>/dev/null; then
82
+ return 0 # Empty
83
+ fi
84
+ # Check if there's actual content after learnings:
85
+ local after_learnings
86
+ after_learnings=$(sed -n '/^learnings:/,/^[a-z]/p' "$file" | grep -v "^#" | grep -v "^learnings:" | grep -v "^$" | head -1)
87
+ if [ -z "$after_learnings" ]; then
88
+ return 0 # Empty
89
+ fi
90
+ return 1 # Has content
91
+ }
92
+
93
+ # Get file line count
94
+ get_line_count() {
95
+ wc -l < "$1" | tr -d ' '
96
+ }
97
+
98
+ # Calculate days since date
99
+ days_since() {
100
+ local date_str="$1"
101
+ local date_epoch
102
+ local now_epoch
103
+
104
+ # Handle different date formats
105
+ if [[ "$date_str" =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}$ ]]; then
106
+ date_epoch=$(date -d "$date_str" +%s 2>/dev/null || date -j -f "%Y-%m-%d" "$date_str" +%s 2>/dev/null)
107
+ else
108
+ echo "999" # Invalid date
109
+ return
110
+ fi
111
+
112
+ now_epoch=$(date +%s)
113
+ echo $(( (now_epoch - date_epoch) / 86400 ))
114
+ }
115
+
116
+ # Validate a single expertise file
117
+ validate_expertise() {
118
+ local domain="$1"
119
+ local file="$EXPERTS_DIR/$domain/expertise.yaml"
120
+ local status="PASS"
121
+ local issues=()
122
+
123
+ TOTAL=$((TOTAL + 1))
124
+
125
+ # Check file exists
126
+ if [ ! -f "$file" ]; then
127
+ echo -e "${RED}FAIL${NC} $domain - File not found: $file"
128
+ FAILED=$((FAILED + 1))
129
+ return 1
130
+ fi
131
+
132
+ # Schema validation
133
+ local domain_field version last_updated
134
+ domain_field=$(get_yaml_field "$file" "domain")
135
+ version=$(get_yaml_field "$file" "version")
136
+ last_updated=$(get_yaml_field "$file" "last_updated")
137
+
138
+ if [ -z "$domain_field" ]; then
139
+ issues+=("missing 'domain' field")
140
+ status="FAIL"
141
+ fi
142
+
143
+ if [ -z "$version" ]; then
144
+ issues+=("missing 'version' field")
145
+ status="FAIL"
146
+ fi
147
+
148
+ if [ -z "$last_updated" ]; then
149
+ issues+=("missing 'last_updated' field")
150
+ status="FAIL"
151
+ fi
152
+
153
+ # Staleness check
154
+ if [ -n "$last_updated" ]; then
155
+ local days_old
156
+ days_old=$(days_since "$last_updated")
157
+ if [ "$days_old" -gt "$STALE_THRESHOLD_DAYS" ]; then
158
+ issues+=("stale (${days_old} days since update)")
159
+ if [ "$status" = "PASS" ]; then
160
+ status="WARN"
161
+ fi
162
+ fi
163
+ fi
164
+
165
+ # File size check
166
+ local line_count
167
+ line_count=$(get_line_count "$file")
168
+ if [ "$line_count" -gt "$MAX_LINES" ]; then
169
+ issues+=("large file (${line_count} lines > ${MAX_LINES})")
170
+ if [ "$status" = "PASS" ]; then
171
+ status="WARN"
172
+ fi
173
+ fi
174
+
175
+ # Learnings check
176
+ if learnings_empty "$file"; then
177
+ issues+=("no learnings recorded (never self-improved)")
178
+ if [ "$status" = "PASS" ]; then
179
+ status="WARN"
180
+ fi
181
+ fi
182
+
183
+ # Output result
184
+ case "$status" in
185
+ PASS)
186
+ echo -e "${GREEN}PASS${NC} $domain"
187
+ PASSED=$((PASSED + 1))
188
+ ;;
189
+ WARN)
190
+ echo -e "${YELLOW}WARN${NC} $domain - ${issues[*]}"
191
+ WARNINGS=$((WARNINGS + 1))
192
+ ;;
193
+ FAIL)
194
+ echo -e "${RED}FAIL${NC} $domain - ${issues[*]}"
195
+ FAILED=$((FAILED + 1))
196
+ ;;
197
+ esac
198
+ }
199
+
200
+ # Main
201
+ main() {
202
+ # Handle help
203
+ if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then
204
+ show_help
205
+ exit 0
206
+ fi
207
+
208
+ # Find script directory and change to repo root
209
+ local script_dir
210
+ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
211
+ cd "$script_dir/.."
212
+
213
+ # Check experts directory exists
214
+ if [ ! -d "$EXPERTS_DIR" ]; then
215
+ echo -e "${RED}Error:${NC} Experts directory not found: $EXPERTS_DIR"
216
+ echo "Are you running this from the repository root?"
217
+ exit 1
218
+ fi
219
+
220
+ echo -e "${BLUE}Validating Agent Expert Files${NC}"
221
+ echo "================================"
222
+ echo ""
223
+
224
+ # Validate specific domain or all
225
+ if [ -n "$1" ]; then
226
+ # Single domain
227
+ if [ ! -d "$EXPERTS_DIR/$1" ]; then
228
+ echo -e "${RED}Error:${NC} Domain not found: $1"
229
+ echo "Available domains:"
230
+ ls -1 "$EXPERTS_DIR" | grep -v templates | grep -v README
231
+ exit 1
232
+ fi
233
+ validate_expertise "$1"
234
+ else
235
+ # All domains
236
+ for dir in "$EXPERTS_DIR"/*/; do
237
+ local domain
238
+ domain=$(basename "$dir")
239
+ # Skip templates directory
240
+ if [ "$domain" = "templates" ]; then
241
+ continue
242
+ fi
243
+ validate_expertise "$domain"
244
+ done
245
+ fi
246
+
247
+ # Summary
248
+ echo ""
249
+ echo "================================"
250
+ echo -e "Total: $TOTAL | ${GREEN}Passed: $PASSED${NC} | ${YELLOW}Warnings: $WARNINGS${NC} | ${RED}Failed: $FAILED${NC}"
251
+
252
+ # Exit code
253
+ if [ "$FAILED" -gt 0 ]; then
254
+ exit 1
255
+ fi
256
+ exit 0
257
+ }
258
+
259
+ main "$@"
@@ -1,6 +1,6 @@
1
1
  ---
2
2
  description: Generate context export for web AI tools
3
- argument-hint: [MODE=full|export|note|research] [NOTE=<text>] [TOPIC=<text>] [DETAILS=<text>]
3
+ argument-hint: [MODE=full|export|note|research|import] [NOTE=<text>] [TOPIC=<text>] [DETAILS=<text>] [CONTENT=<text>] [SOURCE=<url>]
4
4
  ---
5
5
 
6
6
  <!-- COMPACT_SUMMARY_START
@@ -12,11 +12,12 @@ This section is extracted by the PreCompact hook to preserve essential context a
12
12
  Web AI Context Manager - Generates/exports/manages project context briefs for web AI tools (ChatGPT, Perplexity, Gemini, Claude web).
13
13
 
14
14
  ### Critical Behavioral Rules
15
- - **ALWAYS create TodoWrite list** for MODE=full and MODE=research to track multi-step workflows
15
+ - **ALWAYS create TodoWrite list** for MODE=full, MODE=research, and MODE=import to track multi-step workflows
16
16
  - **Diff-first approach**: Show changes and wait for YES/NO confirmation before ANY file writes
17
17
  - **Preserve user-written content**: Only update managed sections in docs/context.md
18
18
  - **No writes in export mode**: MODE=export outputs text only, never writes files
19
19
  - **Research is two-step**: STEP 1 generates prompt, STEP 2 stores results when user returns
20
+ - **Import is one-step**: Process CONTENT immediately and create research file
20
21
  - **Link research files**: Always reference research from ADRs/Epics/Stories that use it
21
22
 
22
23
  ### Core Workflow
@@ -50,6 +51,16 @@ Web AI Context Manager - Generates/exports/manages project context briefs for we
50
51
  7. Ask if user wants ADR/Epic/Story created from research
51
52
  8. Add research reference to created ADR/Epic/Story
52
53
 
54
+ **MODE=import**
55
+ 1. Create todo list tracking: validate inputs, process content, extract key points, extract code, generate actions, suggest stories, format research file, save, update index
56
+ 2. Validate TOPIC and CONTENT are provided
57
+ 3. Process raw content (transcript, article, etc.) by: summarizing key points, extracting code snippets, generating action items, suggesting user stories
58
+ 4. Format into structured research file with all extracted sections
59
+ 5. Save to docs/10-research/YYYYMMDD-topic-slug.md
60
+ 6. Update docs/10-research/README.md index
61
+ 7. Ask if user wants ADR/Epic/Story created from imported content
62
+ 8. Add research reference to created ADR/Epic/Story
63
+
53
64
  ### Key Files
54
65
  - docs/context.md - Main context brief (managed sections + user content)
55
66
  - docs/10-research/YYYYMMDD-topic-slug.md - Research results storage
@@ -68,9 +79,11 @@ Generate, export, or manage the web AI context brief.
68
79
  ROLE: Web AI Context Manager
69
80
 
70
81
  INPUTS (optional)
71
- - MODE=full|export|note|research (default: full)
82
+ - MODE=full|export|note|research|import (default: full)
72
83
  - NOTE=<text> (required if MODE=note)
73
- - TOPIC=<text> (required if MODE=research)
84
+ - TOPIC=<text> (required if MODE=research or MODE=import)
85
+ - CONTENT=<text> (required if MODE=import - raw content to process)
86
+ - SOURCE=<url> (optional for MODE=import - original source URL)
74
87
 
75
88
  ---
76
89
 
@@ -249,6 +262,124 @@ When user pastes research results back:
249
262
 
250
263
  ---
251
264
 
265
+ ## MODE=import
266
+ Import raw content (transcripts, articles, notes) and convert to structured research file.
267
+
268
+ ### Input
269
+ - TOPIC=<text> (required - name for the research file)
270
+ - CONTENT=<text> (required - raw content to process: transcript, article, notes, etc.)
271
+ - SOURCE=<url> (optional - original source URL for reference)
272
+
273
+ ### Use Cases
274
+ - YouTube video transcripts
275
+ - Conference talk notes
276
+ - Podcast transcripts
277
+ - Blog posts / articles
278
+ - Documentation pages
279
+ - Forum discussions / Stack Overflow threads
280
+ - Meeting notes
281
+
282
+ ### TODO LIST TRACKING
283
+ **CRITICAL**: Immediately create a todo list using TodoWrite tool to track import workflow:
284
+ ```
285
+ 1. Validate TOPIC and CONTENT are provided
286
+ 2. Analyze and summarize key points from content
287
+ 3. Extract any code snippets
288
+ 4. Generate action items based on content
289
+ 5. Create user story suggestions (if applicable)
290
+ 6. Format into structured research markdown
291
+ 7. Show diff for review
292
+ 8. Save to docs/10-research/YYYYMMDD-topic-slug.md
293
+ 9. Update docs/10-research/README.md index
294
+ 10. Ask about creating ADR/Epic/Story from research
295
+ ```
296
+
297
+ Mark each step complete as you finish it.
298
+
299
+ ### Processing Steps
300
+
301
+ 1. **Validate Inputs**
302
+ - Verify TOPIC is provided (error if missing)
303
+ - Verify CONTENT is provided (error if missing)
304
+ - SOURCE is optional but recommended for attribution
305
+
306
+ 2. **Analyze Content**
307
+ Extract from the raw content:
308
+ - **Summary**: 2-3 paragraph TL;DR of the main points
309
+ - **Key Findings**: Bullet list of important takeaways
310
+ - **Code Snippets**: Any code blocks, commands, or configuration (preserve exactly)
311
+ - **Action Items**: Concrete next steps mentioned or implied
312
+ - **Story Suggestions**: Potential user stories/epics based on content
313
+
314
+ 3. **Format Research File**
315
+ ```markdown
316
+ # [Topic Title]
317
+
318
+ **Import Date**: YYYY-MM-DD
319
+ **Topic**: [original topic]
320
+ **Source**: [URL if provided, or "Direct import"]
321
+ **Content Type**: [transcript/article/notes/etc.]
322
+
323
+ ## Summary
324
+ [2-3 paragraph executive summary of the content]
325
+
326
+ ## Key Findings
327
+ - [Main point 1 with details]
328
+ - [Main point 2 with details]
329
+ - [Main point 3 with details]
330
+ - ...
331
+
332
+ ## Code Snippets
333
+ [Preserve all code snippets exactly as they appeared]
334
+ ```language
335
+ [code here]
336
+ ```
337
+
338
+ ## Action Items
339
+ - [ ] [Action 1 - concrete next step]
340
+ - [ ] [Action 2 - concrete next step]
341
+ - [ ] [Action 3 - concrete next step]
342
+
343
+ ## Story Suggestions
344
+ [If content suggests feature work, list potential stories]
345
+
346
+ ### Potential Epic: [Epic Title]
347
+ - **US-XXXX**: [Story 1 title]
348
+ - AC: [acceptance criteria bullet]
349
+ - **US-XXXX**: [Story 2 title]
350
+ - AC: [acceptance criteria bullet]
351
+
352
+ ## Raw Content Reference
353
+ <details>
354
+ <summary>Original content (click to expand)</summary>
355
+
356
+ [First 500 chars of original content for reference...]
357
+ </details>
358
+
359
+ ## References
360
+ - Source: [URL or "Direct import"]
361
+ - Import date: [YYYY-MM-DD]
362
+ ```
363
+
364
+ 4. **Save and Index**
365
+ - Save to `docs/10-research/YYYYMMDD-<topic-slug>.md`
366
+ - Update `docs/10-research/README.md` with new entry
367
+
368
+ 5. **Offer Next Steps**
369
+ Ask user via AskUserQuestion:
370
+ - Create an ADR referencing this research?
371
+ - Create an Epic/Stories based on the story suggestions?
372
+ - Link this research to an existing Epic/Story?
373
+
374
+ ### Rules
375
+ - Diff-first; YES/NO before writing research file
376
+ - Preserve ALL code snippets exactly as provided
377
+ - Generate actionable items (not vague suggestions)
378
+ - Keep raw content reference collapsed to save space
379
+ - Always update the research index
380
+
381
+ ---
382
+
252
383
  ## Usage Examples
253
384
 
254
385
  ```bash
@@ -262,9 +393,13 @@ When user pastes research results back:
262
393
  # Add a quick note
263
394
  /agileflow:context MODE=note NOTE="User reported auth bug in production"
264
395
 
265
- # Build research prompt
396
+ # Build research prompt for web AI
266
397
  /agileflow:context MODE=research TOPIC="Implement OAuth 2.0 with Google"
267
398
  /agileflow:context MODE=research TOPIC="Add Stripe payments" DETAILS="Launch by end of sprint"
399
+
400
+ # Import external content (transcripts, articles, notes)
401
+ /agileflow:context MODE=import TOPIC="React Server Components" CONTENT="[paste transcript here]"
402
+ /agileflow:context MODE=import TOPIC="Stripe Webhooks Tutorial" SOURCE="https://youtube.com/..." CONTENT="[paste transcript here]"
268
403
  ```
269
404
 
270
405
  ---
@@ -276,3 +411,4 @@ Depending on MODE:
276
411
  - **export**: Text output ready to paste into web AI tool
277
412
  - **note**: Appended note to docs/context.md (after YES confirmation)
278
413
  - **research**: Research prompt in code block ready to paste into web AI tool
414
+ - **import**: Processed research file saved to docs/10-research/ (after YES confirmation)