@iservu-inc/adf-cli 0.14.6 → 0.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/.project/chats/current/SESSION-STATUS.md +25 -279
  2. package/.project/docs/PHASE-6-ADVANCED-LEARNING.md +46 -0
  3. package/.project/docs/ROADMAP.md +72 -157
  4. package/.project/docs/designs/CUSTOM-ARTIFACT-UPLOAD.md +259 -0
  5. package/.project/docs/designs/LEARNING-RULES-EXCHANGE.md +77 -0
  6. package/CHANGELOG.md +2054 -2995
  7. package/README.md +4 -7
  8. package/bin/adf.js +80 -0
  9. package/conductor/tracks/session_resume_review_20260113/plan.md +18 -0
  10. package/conductor/tracks.md +4 -0
  11. package/gemini.md +3 -0
  12. package/lib/ai/ai-client.js +9 -9
  13. package/lib/commands/deploy.js +14 -0
  14. package/lib/commands/guide.js +32 -0
  15. package/lib/commands/import.js +439 -0
  16. package/lib/commands/init.js +17 -4
  17. package/lib/frameworks/interviewer.js +277 -85
  18. package/lib/frameworks/progress-tracker.js +18 -0
  19. package/lib/generators/a2a-generator.js +289 -0
  20. package/lib/generators/index.js +11 -0
  21. package/lib/learning/learning-manager.js +107 -8
  22. package/lib/learning/rules-exporter.js +103 -0
  23. package/lib/learning/rules-importer.js +141 -0
  24. package/lib/templates/shared/agents/analyst.md +1 -1
  25. package/lib/templates/shared/agents/architect.md +1 -1
  26. package/lib/templates/shared/agents/dev.md +1 -1
  27. package/lib/templates/shared/agents/pm.md +2 -2
  28. package/lib/templates/shared/agents/qa.md +1 -1
  29. package/lib/templates/shared/agents/sm.md +3 -3
  30. package/lib/templates/shared/memory/constitution.md +2 -2
  31. package/lib/templates/shared/templates/README.md +14 -14
  32. package/lib/templates/shared/templates/prd-template.md +1 -1
  33. package/lib/utils/artifact-detector.js +253 -0
  34. package/lib/utils/tool-feature-registry.js +6 -0
  35. package/package.json +1 -1
  36. package/tests/a2a-generator.test.js +288 -0
  37. package/tests/progress-tracker.test.js +16 -0
package/README.md CHANGED
@@ -186,13 +186,10 @@ ANTHROPIC_API_KEY=sk-ant-api03-...
186
186
 
187
187
  The AI provider enhances your workflow by:
188
188
 
189
- - **Smart Question Filtering** - Automatically analyzes your project and skips irrelevant questions (e.g., UI questions for CLI tools)
190
- - **Learning System** - Learns from your skip behavior to improve filtering over time:
191
- - Automatically detects patterns in skipped questions across sessions
192
- - Generates learned rules from high-confidence patterns (75%+)
193
- - Applies your preferences in future interviews with your approval
194
- - Privacy-first design: all data stored locally in `.adf/learning/`
195
- - Manage via `adf config` → Learning System
189
+ - **Smart Filtering** - AI-powered question filtering based on context
190
+ - **Learning System** - Adapts to your preferences over time with pattern decay
191
+ - **Analytics Dashboard** - Comprehensive insights into time saved and learning health
192
+ - **Session Resume** - Pause and resume interviews anytime
196
193
  - **Real-Time Answer Quality Analysis** - Scores your answers 0-100
197
194
  - **Intelligent Follow-Up Questions** - Automatically generated based on your responses
198
195
  - **Context-Aware Guidance** - Tailored suggestions for your project type
package/bin/adf.js CHANGED
@@ -22,6 +22,7 @@ const updateCommand = require('../lib/commands/update');
22
22
  const configCommand = require('../lib/commands/config');
23
23
  const guideCommand = require('../lib/commands/guide');
24
24
  const toolsCommand = require('../lib/commands/tools');
25
+ const importCommand = require('../lib/commands/import');
25
26
 
26
27
  const program = new Command();
27
28
 
@@ -71,6 +72,9 @@ ${chalk.cyan.bold('Getting Help:')}
71
72
  $ adf update --help
72
73
  $ adf tools audit --help
73
74
 
75
+ ${chalk.gray('# Import existing documentation')}
76
+ $ adf import ./docs/PRD.md
77
+
74
78
  ${chalk.gray('# Get tool-specific setup guides')}
75
79
  $ adf guide windsurf
76
80
  $ adf guide claude-code
@@ -146,6 +150,15 @@ ${chalk.cyan.bold('Examples:')}
146
150
  ${chalk.gray('# Comprehensive workflow for enterprise project')}
147
151
  $ adf init --comprehensive
148
152
 
153
+ ${chalk.cyan.bold('Existing Projects:')}
154
+ Running ${chalk.white('adf init')} in a directory with existing content:
155
+ • ${chalk.yellow('Synthesize & Augment')} - Detects other frameworks (BMAD/OpenSpec)
156
+ and merges them into a unified ADF session.
157
+ • ${chalk.yellow('Resume Session')} - Detects interrupted interviews and lets you
158
+ continue exactly where you left off.
159
+ • ${chalk.yellow('Continue Project')} - Start a new session while preserving
160
+ existing project history and learning data.
161
+
149
162
  ${chalk.cyan.bold('Features:')}
150
163
  • AI-powered question filtering based on project context
151
164
  • Answer quality analysis and suggestions
@@ -394,6 +407,73 @@ ${chalk.cyan.bold('What\'s in a Guide:')}
394
407
  `)
395
408
  .action(guideCommand);
396
409
 
410
+ // adf import
411
+ program
412
+ .command('import <files...>')
413
+ .description('Import existing markdown documentation into ADF')
414
+ .option('-t, --type <type>', 'Artifact type (prd, architecture, stories, tasks, spec, constitution, plan, prp, custom)')
415
+ .option('-n, --name <name>', 'Custom artifact name (used with --type custom)')
416
+ .option('-s, --session <id>', 'Merge into existing session (use "latest" for most recent)')
417
+ .option('-i, --interactive', 'Prompt for type of each file')
418
+ .option('--no-normalize', 'Skip content normalization')
419
+ .option('--dry-run', 'Show what would be imported without creating session')
420
+ .addHelpText('after', `
421
+ ${chalk.cyan.bold('Description:')}
422
+ Import existing markdown documentation (PRDs, design docs, specifications)
423
+ into ADF for deployment to AI coding assistants. Useful for projects that
424
+ already have formal artifacts from other tools or processes.
425
+
426
+ ${chalk.cyan.bold('Supported Artifact Types:')}
427
+ ${chalk.yellow('prd')} - Product Requirements Document
428
+ ${chalk.yellow('architecture')} - System/Technical Architecture
429
+ ${chalk.yellow('stories')} - User Stories / Backlog
430
+ ${chalk.yellow('tasks')} - Implementation Tasks
431
+ ${chalk.yellow('specification')} - Feature Specification (alias: spec)
432
+ ${chalk.yellow('constitution')} - Quality Standards/Guidelines
433
+ ${chalk.yellow('plan')} - Technical/Project Plan
434
+ ${chalk.yellow('prp')} - Agent-Native PRP Document
435
+ ${chalk.yellow('custom')} - Custom artifact (requires --name)
436
+
437
+ ${chalk.cyan.bold('Auto-Detection:')}
438
+ ADF automatically detects artifact types based on:
439
+ - Filename patterns (e.g., "PRD.md" -> prd)
440
+ - Frontmatter metadata (type: prd)
441
+ - Content analysis (headers, keywords)
442
+
443
+ ${chalk.cyan.bold('Examples:')}
444
+ ${chalk.gray('# Import single file (auto-detect type)')}
445
+ $ adf import ./docs/PRD.md
446
+
447
+ ${chalk.gray('# Import multiple files')}
448
+ $ adf import ./docs/PRD.md ./docs/architecture.md
449
+
450
+ ${chalk.gray('# Import entire directory')}
451
+ $ adf import ./docs/
452
+
453
+ ${chalk.gray('# Specify type explicitly')}
454
+ $ adf import ./design.md --type architecture
455
+
456
+ ${chalk.gray('# Import custom artifact')}
457
+ $ adf import ./compliance.md --type custom --name "Compliance Requirements"
458
+
459
+ ${chalk.gray('# Interactive mode (choose type for each file)')}
460
+ $ adf import ./docs/*.md --interactive
461
+
462
+ ${chalk.gray('# Merge into existing session')}
463
+ $ adf import ./extra-docs.md --session latest
464
+
465
+ ${chalk.gray('# Preview what would be imported')}
466
+ $ adf import ./docs/*.md --dry-run
467
+
468
+ ${chalk.cyan.bold('Output:')}
469
+ Creates a session at: ${chalk.white('.adf/sessions/{timestamp}_imported/')}
470
+
471
+ ${chalk.cyan.bold('After Import:')}
472
+ Run ${chalk.white('adf deploy <tool>')} to deploy imported artifacts
473
+ to your AI coding assistant (Windsurf, Cursor, etc.)
474
+ `)
475
+ .action(importCommand);
476
+
397
477
  // adf config
398
478
  program
399
479
  .command('config')
@@ -0,0 +1,18 @@
1
+ # Track: Session Resume Context Review
2
+
3
+ ## Objective
4
+ Implement a feature to allow users to review previous Question/Answer pairs when resuming an ADF session. The review interface should support pagination for large histories and provide options to continue, skip, or exit the review process at any time.
5
+
6
+ ## Tasks
7
+ - [x] Implement `reviewProgress()` method in `Interviewer` class with pagination support.
8
+ - [x] Integrate `reviewProgress()` into `start()` method for resume flow.
9
+ - [x] Handle 'back' signal in `init` command to allow returning to main menu.
10
+ - [x] Verify implementation with tests (implicit verification via `npm test` passing existing tests and manual flow logic check).
11
+ - [x] Add 'Processing reply...' spinner to Interviewer to indicate background AI activity.
12
+ - [x] Fix Google Gemini follow-up truncation by optimizing prompt and lowering temperature.
13
+ - [x] Add minimum delay to spinner to prevent 'flicker' and improve UX.
14
+ - [x] Implement answer editing functionality during context review.
15
+ - [x] Add `updateAnswer` method to `ProgressTracker` with unit tests.
16
+
17
+ ## Context
18
+ Users resuming a session often need context on what they answered previously. Dumping all answers at once can overwhelm the terminal. A paginated review process improves UX and allows users to re-orient themselves before continuing.
@@ -7,3 +7,7 @@ This file tracks all major tracks for the project. Each track has its own detail
7
7
  ## [x] Track: Bootstrap Agent-Native Documentation
8
8
  *Link: [./conductor/tracks/bootstrap_agents_20260111/](./conductor/tracks/bootstrap_agents_20260111/)*
9
9
 
10
+ ## [x] Track: Session Resume Context Review
11
+ *Link: [./conductor/tracks/session_resume_review_20260113/](./conductor/tracks/session_resume_review_20260113/)*
12
+
13
+
package/gemini.md ADDED
@@ -0,0 +1,3 @@
1
+ # Gemini Global Rules
2
+
3
+ - Do not engage in the execution of multiple tasks at once, make sure you stay focused, use context and work through each task one-at-a-time from beginning (problem definition or task goal) to conclusion (meet goal through iterative unit test or code validation, ensuring accurate representation of successful completion). It is imperative that you perform better today than you did yesterday by paying more attention to detail and ensuring that if you offer confirmation of completion for any task, that statement has the highest probability of being 100% true (to the best of your ability)
@@ -403,21 +403,21 @@ Respond with ONLY a JSON object in this exact format:
403
403
  * Generate follow-up question based on answer quality
404
404
  */
405
405
  async generateFollowUp(originalQuestion, answer, issues) {
406
- const prompt = `You are an expert software requirements analyst. Based on the user's answer, generate a helpful follow-up question.
406
+ const prompt = `You are a helpful software requirements analyst.
407
+ The user provided an answer that is a bit vague.
408
+ Your task is to ask a polite follow-up question to get more specific details.
407
409
 
408
410
  Original Question: ${originalQuestion}
409
-
410
411
  User's Answer: ${answer}
412
+ Missing Information: ${issues.join(', ')}
411
413
 
412
- Issues Identified: ${issues.join(', ')}
413
-
414
- Generate ONE concise follow-up question (max 20 words) to help the user provide more specific information. Focus on the most critical missing element.
415
-
416
- Respond with ONLY the follow-up question, no explanation.`;
414
+ Please generate one single follow-up question.
415
+ Example: "Could you specify which database you plan to use?"
416
+ Question:`;
417
417
 
418
418
  const response = await this.sendMessage(prompt, {
419
- maxTokens: 100,
420
- temperature: 0.7
419
+ maxTokens: 300,
420
+ temperature: 0.4
421
421
  });
422
422
 
423
423
  return response.content.trim().replace(/^["']|["']$/g, ''); // Remove quotes if present
@@ -159,6 +159,20 @@ async function deployToTool(tool, options = {}) {
159
159
  console.warn(chalk.yellow(`\n⚠️ Warning: Could not generate AGENTS.md: ${error.message}`));
160
160
  }
161
161
 
162
+ // Generate A2A agent cards (if not already present)
163
+ const a2aDir = path.join(cwd, '.a2a');
164
+ if (!await fs.pathExists(a2aDir)) {
165
+ try {
166
+ const { generateA2A } = require('../generators');
167
+ await generateA2A(sessionPath, cwd, framework);
168
+ if (!options.silent && !spinner) {
169
+ console.log(chalk.green('✓ Generated A2A agent cards'));
170
+ }
171
+ } catch (error) {
172
+ console.warn(chalk.yellow(`⚠ Could not generate A2A cards: ${error.message}`));
173
+ }
174
+ }
175
+
162
176
  // Generate tool-specific configurations
163
177
  if (spinner) spinner.text = `Generating ${TOOLS[tool]?.name || tool} configurations...`;
164
178
 
@@ -295,6 +295,38 @@ const TOOL_GUIDES = {
295
295
  ]
296
296
  },
297
297
 
298
+ 'a2a': {
299
+ name: 'A2A (Agent-to-Agent) Protocol',
300
+ files: [
301
+ { path: '.a2a/agent-card.json', desc: 'Combined discovery card with all agent skills' },
302
+ { path: '.a2a/agents/*.json', desc: 'Individual agent cards (dev, qa, pm, etc.)' },
303
+ { path: 'AGENTS.md', desc: 'Universal agent manifest' }
304
+ ],
305
+ setup: [
306
+ '1. A2A cards are auto-generated during `adf init`',
307
+ '2. Cards are also created as fallback during `adf deploy <tool>`',
308
+ '3. No additional setup required — cards follow the A2A protocol spec',
309
+ '4. Individual agent cards are in .a2a/agents/',
310
+ '5. Combined discovery card at .a2a/agent-card.json'
311
+ ],
312
+ usage: [
313
+ '• A2A cards enable interoperability with any A2A-compatible tool',
314
+ '• Combined card (.a2a/agent-card.json) lists all agents and skills',
315
+ '• Individual cards (.a2a/agents/<name>.json) per agent role',
316
+ '• Skills are derived from agent markdown frontmatter',
317
+ '• Cards include MCP tool references from agent definitions',
318
+ '• Protocol version: 0.3 (JSONRPC binding)',
319
+ '• Spec: https://google.github.io/A2A/'
320
+ ],
321
+ mcpServers: [],
322
+ troubleshooting: [
323
+ '• Cards not generated? Run `adf init` or `adf deploy <tool>`',
324
+ '• Missing agents? Check workflow level (rapid=2, balanced=4, comprehensive=6)',
325
+ '• Regenerate by deleting .a2a/ directory and running deploy again',
326
+ '• Validate JSON: cat .a2a/agent-card.json | python -m json.tool'
327
+ ]
328
+ },
329
+
298
330
  'deepagent': {
299
331
  name: 'Abacus.ai DeepAgent',
300
332
  files: [
@@ -0,0 +1,439 @@
1
+ const fs = require('fs-extra');
2
+ const path = require('path');
3
+ const chalk = require('chalk');
4
+ const ora = require('ora');
5
+ const inquirer = require('inquirer');
6
+ const { detectArtifactType, getArtifactTypes, isValidType } = require('../utils/artifact-detector');
7
+
8
+ /**
9
+ * Import Command
10
+ * Import existing markdown documentation into ADF
11
+ */
12
+
13
+ /**
14
+ * Generate a session ID for imported artifacts
15
+ * @returns {string} - Session ID in format: {timestamp}_imported
16
+ */
17
+ function generateSessionId() {
18
+ const now = new Date();
19
+ const timestamp = now.toISOString()
20
+ .replace(/[-:]/g, '')
21
+ .replace('T', '_')
22
+ .split('.')[0];
23
+ return `${timestamp}_imported`;
24
+ }
25
+
26
+ /**
27
+ * Normalize markdown content with ADF metadata
28
+ * @param {string} content - Original content
29
+ * @param {string} type - Artifact type
30
+ * @param {string} sourcePath - Original source path
31
+ * @returns {string} - Normalized content
32
+ */
33
+ function normalizeContent(content, type, sourcePath) {
34
+ // Check if frontmatter already exists
35
+ const hasFrontmatter = content.startsWith('---');
36
+
37
+ const adfMetadata = {
38
+ adf_type: type,
39
+ adf_imported: true,
40
+ adf_source: sourcePath,
41
+ adf_imported_at: new Date().toISOString()
42
+ };
43
+
44
+ if (hasFrontmatter) {
45
+ // Insert ADF metadata into existing frontmatter
46
+ return content.replace(/^---\s*\n/, `---\n${formatYamlMetadata(adfMetadata)}`);
47
+ }
48
+
49
+ // Add new frontmatter
50
+ const frontmatter = `---\n${formatYamlMetadata(adfMetadata)}---\n\n`;
51
+ return frontmatter + content;
52
+ }
53
+
54
+ /**
55
+ * Format metadata as YAML
56
+ * @param {Object} metadata - Metadata object
57
+ * @returns {string} - YAML formatted string
58
+ */
59
+ function formatYamlMetadata(metadata) {
60
+ return Object.entries(metadata)
61
+ .map(([key, value]) => `${key}: ${JSON.stringify(value)}`)
62
+ .join('\n') + '\n';
63
+ }
64
+
65
+ /**
66
+ * Create session directory structure
67
+ * @param {string} projectPath - Project root
68
+ * @param {string} sessionId - Session ID
69
+ * @returns {Object} - Paths object
70
+ */
71
+ async function createSessionStructure(projectPath, sessionId) {
72
+ const sessionsDir = path.join(projectPath, '.adf', 'sessions');
73
+ const sessionPath = path.join(sessionsDir, sessionId);
74
+ const outputsPath = path.join(sessionPath, 'outputs');
75
+ const customPath = path.join(outputsPath, 'custom');
76
+ const sourcesPath = path.join(sessionPath, 'sources');
77
+
78
+ await fs.ensureDir(outputsPath);
79
+ await fs.ensureDir(customPath);
80
+ await fs.ensureDir(sourcesPath);
81
+
82
+ return { sessionPath, outputsPath, customPath, sourcesPath };
83
+ }
84
+
85
+ /**
86
+ * Create session metadata
87
+ * @param {string} sessionPath - Session directory path
88
+ * @param {Array} sourceFiles - List of source files
89
+ * @param {Array} artifactTypes - List of artifact types
90
+ * @param {string} projectPath - Project root path
91
+ */
92
+ async function createSessionMetadata(sessionPath, sourceFiles, artifactTypes, projectPath) {
93
+ const metadata = {
94
+ framework: 'imported',
95
+ importedAt: new Date().toISOString(),
96
+ sourceFiles: sourceFiles.map(f => path.relative(projectPath, f)),
97
+ artifactTypes: [...new Set(artifactTypes)],
98
+ projectPath: projectPath
99
+ };
100
+
101
+ await fs.writeJson(path.join(sessionPath, '_metadata.json'), metadata, { spaces: 2 });
102
+
103
+ const progress = {
104
+ status: 'completed',
105
+ canResume: false,
106
+ importedArtifacts: sourceFiles.length,
107
+ completedBlocks: artifactTypes,
108
+ totalBlocks: artifactTypes.length,
109
+ totalQuestionsAnswered: 0,
110
+ lastUpdated: new Date().toISOString()
111
+ };
112
+
113
+ await fs.writeJson(path.join(sessionPath, '_progress.json'), progress, { spaces: 2 });
114
+ }
115
+
116
+ /**
117
+ * Resolve files from input arguments (handles globs and directories)
118
+ * @param {Array} inputs - File paths, directories, or glob patterns
119
+ * @returns {Promise<Array>} - Resolved file paths
120
+ */
121
+ async function resolveFiles(inputs) {
122
+ const files = [];
123
+
124
+ for (const input of inputs) {
125
+ const fullPath = path.resolve(input);
126
+
127
+ if (!await fs.pathExists(fullPath)) {
128
+ console.warn(chalk.yellow(`⚠️ File not found: ${input}`));
129
+ continue;
130
+ }
131
+
132
+ const stats = await fs.stat(fullPath);
133
+
134
+ if (stats.isDirectory()) {
135
+ // Scan directory for markdown files
136
+ const dirFiles = await fs.readdir(fullPath);
137
+ for (const file of dirFiles) {
138
+ if (file.endsWith('.md') || file.endsWith('.markdown')) {
139
+ files.push(path.join(fullPath, file));
140
+ }
141
+ }
142
+ } else if (stats.isFile() && (fullPath.endsWith('.md') || fullPath.endsWith('.markdown'))) {
143
+ files.push(fullPath);
144
+ } else {
145
+ console.warn(chalk.yellow(`⚠️ Skipping non-markdown file: ${input}`));
146
+ }
147
+ }
148
+
149
+ return files;
150
+ }
151
+
152
+ /**
153
+ * Interactive type selection for a file
154
+ * @param {string} filePath - File path
155
+ * @param {Object} detection - Auto-detection result
156
+ * @returns {Promise<Object>} - Final type selection
157
+ */
158
+ async function promptForType(filePath, detection) {
159
+ const types = getArtifactTypes();
160
+ const filename = path.basename(filePath);
161
+
162
+ console.log(chalk.cyan(`\n📄 ${filename}`));
163
+
164
+ if (detection.confidence > 0) {
165
+ console.log(chalk.gray(` Auto-detected: ${detection.config.name} (${detection.confidence}% confidence)`));
166
+ }
167
+
168
+ const choices = [
169
+ ...Object.entries(types).map(([key, config]) => ({
170
+ name: `${config.name} → ${config.outputFile}`,
171
+ value: key
172
+ })),
173
+ { name: chalk.yellow('Custom (specify name)'), value: 'custom' },
174
+ { name: chalk.gray('Skip this file'), value: 'skip' }
175
+ ];
176
+
177
+ // Pre-select detected type if high confidence
178
+ const defaultChoice = detection.confidence >= 60 ? detection.type : undefined;
179
+
180
+ const { selectedType } = await inquirer.prompt([
181
+ {
182
+ type: 'list',
183
+ name: 'selectedType',
184
+ message: `Select artifact type for ${filename}:`,
185
+ choices,
186
+ default: defaultChoice
187
+ }
188
+ ]);
189
+
190
+ if (selectedType === 'skip') {
191
+ return null;
192
+ }
193
+
194
+ if (selectedType === 'custom') {
195
+ const { customName } = await inquirer.prompt([
196
+ {
197
+ type: 'input',
198
+ name: 'customName',
199
+ message: 'Enter custom artifact name:',
200
+ default: filename.replace(/\.md$/, ''),
201
+ validate: input => input.trim().length > 0 || 'Name cannot be empty'
202
+ }
203
+ ]);
204
+
205
+ return {
206
+ type: 'custom',
207
+ customName: customName.trim(),
208
+ outputFile: `custom/${customName.trim().toLowerCase().replace(/\s+/g, '-')}.md`
209
+ };
210
+ }
211
+
212
+ return {
213
+ type: selectedType,
214
+ outputFile: types[selectedType].outputFile
215
+ };
216
+ }
217
+
218
+ /**
219
+ * Main import function
220
+ * @param {Array} files - Files to import
221
+ * @param {Object} options - Command options
222
+ */
223
+ async function importArtifacts(files, options = {}) {
224
+ const cwd = process.cwd();
225
+ const adfDir = path.join(cwd, '.adf');
226
+
227
+ // Ensure .adf directory exists
228
+ if (!await fs.pathExists(adfDir)) {
229
+ console.log(chalk.yellow('\n⚠️ No .adf directory found. Creating one...'));
230
+ await fs.ensureDir(adfDir);
231
+ }
232
+
233
+ // Resolve all input files
234
+ const resolvedFiles = await resolveFiles(files);
235
+
236
+ if (resolvedFiles.length === 0) {
237
+ console.error(chalk.red('\n❌ No valid markdown files found to import.'));
238
+ console.log(chalk.yellow('Specify .md files, directories, or use glob patterns.\n'));
239
+ process.exit(1);
240
+ }
241
+
242
+ console.log(chalk.cyan.bold(`\n📥 Importing ${resolvedFiles.length} file(s)...\n`));
243
+
244
+ // Dry run mode
245
+ if (options.dryRun) {
246
+ console.log(chalk.yellow.bold('DRY RUN - No changes will be made\n'));
247
+ }
248
+
249
+ // Process each file
250
+ const imports = [];
251
+ const skipped = [];
252
+
253
+ for (const filePath of resolvedFiles) {
254
+ const detection = await detectArtifactType(filePath);
255
+ let finalType;
256
+
257
+ if (options.interactive) {
258
+ // Interactive mode - prompt for each file
259
+ finalType = await promptForType(filePath, detection);
260
+ if (!finalType) {
261
+ skipped.push(filePath);
262
+ continue;
263
+ }
264
+ } else if (options.type) {
265
+ // Explicit type provided via --type flag
266
+ if (!isValidType(options.type)) {
267
+ console.error(chalk.red(`\n❌ Invalid artifact type: ${options.type}`));
268
+ console.log(chalk.yellow('Valid types: prd, architecture, stories, tasks, specification, constitution, plan, prp, custom\n'));
269
+ process.exit(1);
270
+ }
271
+
272
+ if (options.type === 'custom') {
273
+ const customName = options.name || path.basename(filePath, '.md');
274
+ finalType = {
275
+ type: 'custom',
276
+ customName,
277
+ outputFile: `custom/${customName.toLowerCase().replace(/\s+/g, '-')}.md`
278
+ };
279
+ } else {
280
+ const types = getArtifactTypes();
281
+ finalType = {
282
+ type: options.type,
283
+ outputFile: types[options.type].outputFile
284
+ };
285
+ }
286
+ } else {
287
+ // Auto-detection mode
288
+ if (detection.confidence < 50) {
289
+ console.log(chalk.yellow(`⚠️ Low confidence (${detection.confidence}%) for ${path.basename(filePath)}`));
290
+ console.log(chalk.gray(` Detected as: ${detection.config.name}`));
291
+ console.log(chalk.gray(` Use --interactive or --type to specify manually\n`));
292
+ }
293
+
294
+ finalType = {
295
+ type: detection.type,
296
+ outputFile: detection.config.outputFile,
297
+ confidence: detection.confidence
298
+ };
299
+ }
300
+
301
+ imports.push({
302
+ sourcePath: filePath,
303
+ ...finalType
304
+ });
305
+
306
+ if (!options.dryRun) {
307
+ console.log(chalk.green(`✓ ${path.basename(filePath)}`));
308
+ console.log(chalk.gray(` → ${finalType.outputFile} (${finalType.type})`));
309
+ }
310
+ }
311
+
312
+ if (options.dryRun) {
313
+ console.log(chalk.cyan.bold('\nWould import:\n'));
314
+ for (const item of imports) {
315
+ console.log(chalk.green(` ${path.basename(item.sourcePath)}`));
316
+ console.log(chalk.gray(` → ${item.outputFile} (${item.type})`));
317
+ }
318
+ if (skipped.length > 0) {
319
+ console.log(chalk.yellow(`\nWould skip: ${skipped.length} file(s)\n`));
320
+ }
321
+ return;
322
+ }
323
+
324
+ if (imports.length === 0) {
325
+ console.log(chalk.yellow('\nNo files to import after processing.\n'));
326
+ return;
327
+ }
328
+
329
+ // Create session
330
+ const spinner = ora('Creating import session...').start();
331
+
332
+ try {
333
+ const sessionId = options.session === 'latest'
334
+ ? await findLatestSession(cwd)
335
+ : options.session || generateSessionId();
336
+
337
+ const { sessionPath, outputsPath, customPath, sourcesPath } = await createSessionStructure(cwd, sessionId);
338
+
339
+ // Copy and normalize files
340
+ const artifactTypes = [];
341
+
342
+ for (const item of imports) {
343
+ // Read source
344
+ const content = await fs.readFile(item.sourcePath, 'utf-8');
345
+
346
+ // Normalize content
347
+ const normalized = options.noNormalize
348
+ ? content
349
+ : normalizeContent(content, item.type, path.relative(cwd, item.sourcePath));
350
+
351
+ // Determine output path
352
+ let outputPath;
353
+ if (item.type === 'custom') {
354
+ outputPath = path.join(customPath, item.customName
355
+ ? `${item.customName.toLowerCase().replace(/\s+/g, '-')}.md`
356
+ : path.basename(item.sourcePath));
357
+ } else {
358
+ outputPath = path.join(outputsPath, item.outputFile);
359
+ }
360
+
361
+ // Write normalized content
362
+ await fs.ensureDir(path.dirname(outputPath));
363
+ await fs.writeFile(outputPath, normalized);
364
+
365
+ // Preserve original
366
+ const originalFilename = `original_${path.basename(item.sourcePath)}`;
367
+ await fs.copy(item.sourcePath, path.join(sourcesPath, originalFilename));
368
+
369
+ artifactTypes.push(item.type);
370
+ }
371
+
372
+ // Create metadata
373
+ await createSessionMetadata(
374
+ sessionPath,
375
+ imports.map(i => i.sourcePath),
376
+ artifactTypes,
377
+ cwd
378
+ );
379
+
380
+ spinner.succeed(`Created import session: ${chalk.cyan(sessionId)}`);
381
+
382
+ console.log(chalk.gray(`\n Session: .adf/sessions/${sessionId}/`));
383
+ console.log(chalk.gray(` Artifacts: ${imports.length}`));
384
+ console.log(chalk.gray(` Types: ${[...new Set(artifactTypes)].join(', ')}`));
385
+
386
+ if (skipped.length > 0) {
387
+ console.log(chalk.yellow(` Skipped: ${skipped.length} file(s)`));
388
+ }
389
+
390
+ console.log(chalk.green.bold('\n✓ Import complete!'));
391
+ console.log(chalk.gray(' Run "adf deploy <tool>" to deploy imported artifacts.\n'));
392
+
393
+ } catch (error) {
394
+ spinner.fail(`Import failed: ${error.message}`);
395
+ throw error;
396
+ }
397
+ }
398
+
399
+ /**
400
+ * Find the latest session ID
401
+ * @param {string} cwd - Current working directory
402
+ * @returns {Promise<string|null>} - Latest session ID or null
403
+ */
404
+ async function findLatestSession(cwd) {
405
+ const sessionsDir = path.join(cwd, '.adf', 'sessions');
406
+
407
+ if (!await fs.pathExists(sessionsDir)) {
408
+ return null;
409
+ }
410
+
411
+ const sessions = await fs.readdir(sessionsDir);
412
+ if (sessions.length === 0) {
413
+ return null;
414
+ }
415
+
416
+ // Sort by timestamp (descending)
417
+ sessions.sort().reverse();
418
+ return sessions[0];
419
+ }
420
+
421
+ /**
422
+ * Command handler
423
+ */
424
+ async function importCommand(files, options) {
425
+ if (!files || files.length === 0) {
426
+ console.error(chalk.red('\n❌ Error: Please specify files to import.'));
427
+ console.log(chalk.yellow('Usage: adf import <files...> [options]'));
428
+ console.log(chalk.gray('\nExamples:'));
429
+ console.log(chalk.gray(' adf import ./docs/PRD.md'));
430
+ console.log(chalk.gray(' adf import ./docs/*.md --interactive'));
431
+ console.log(chalk.gray(' adf import ./design.md --type architecture\n'));
432
+ process.exit(1);
433
+ }
434
+
435
+ await importArtifacts(files, options);
436
+ }
437
+
438
+ module.exports = importCommand;
439
+ module.exports.importArtifacts = importArtifacts;