musubi-sdd 2.2.0 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,759 @@
1
+ /**
2
+ * MUSUBI Parser
3
+ *
4
+ * Parses MUSUBI project structure into Intermediate Representation (IR)
5
+ */
6
+
7
+ 'use strict';
8
+
9
+ const fs = require('fs-extra');
10
+ const path = require('path');
11
+ const yaml = require('js-yaml');
12
+ const { createEmptyProjectIR, createEmptyFeatureIR, createRequirementFromEARS } = require('../ir/types');
13
+
14
+ /**
15
+ * Parse a MUSUBI project into IR
16
+ * @param {string} projectPath - Path to MUSUBI project root
17
+ * @returns {Promise<import('../ir/types').ProjectIR>} Project IR
18
+ */
19
+ async function parseMusubiProject(projectPath) {
20
+ const ir = createEmptyProjectIR();
21
+
22
+ // Parse project metadata
23
+ ir.metadata = await parseProjectMetadata(projectPath);
24
+
25
+ // Parse constitution
26
+ ir.constitution = await parseConstitution(projectPath);
27
+
28
+ // Parse features
29
+ ir.features = await parseFeatures(projectPath);
30
+
31
+ // Parse templates
32
+ ir.templates = await parseTemplates(projectPath);
33
+
34
+ // Parse memories
35
+ ir.memories = await parseMemories(projectPath);
36
+
37
+ return ir;
38
+ }
39
+
40
+ /**
41
+ * Parse project metadata from project.yml
42
+ * @param {string} projectPath
43
+ * @returns {Promise<import('../ir/types').ProjectMetadata>}
44
+ */
45
+ async function parseProjectMetadata(projectPath) {
46
+ const projectYmlPath = path.join(projectPath, 'steering', 'project.yml');
47
+ const productMdPath = path.join(projectPath, 'steering', 'product.md');
48
+
49
+ const metadata = {
50
+ name: '',
51
+ version: '1.0.0',
52
+ sourceFormat: 'musubi',
53
+ sourceVersion: '2.2.0',
54
+ convertedAt: new Date(),
55
+ preservedFields: {},
56
+ };
57
+
58
+ // Try to load project.yml
59
+ if (await fs.pathExists(projectYmlPath)) {
60
+ try {
61
+ const content = await fs.readFile(projectYmlPath, 'utf-8');
62
+ const projectYml = yaml.load(content);
63
+
64
+ metadata.name = projectYml.project?.name || '';
65
+ metadata.version = projectYml.project?.version || '1.0.0';
66
+ metadata.preservedFields.projectYml = projectYml;
67
+ } catch (error) {
68
+ console.warn(`Warning: Failed to parse project.yml: ${error.message}`);
69
+ }
70
+ }
71
+
72
+ // Try to extract name from product.md if not found
73
+ if (!metadata.name && await fs.pathExists(productMdPath)) {
74
+ try {
75
+ const content = await fs.readFile(productMdPath, 'utf-8');
76
+ const titleMatch = content.match(/^#\s+(.+)$/m);
77
+ if (titleMatch) {
78
+ metadata.name = titleMatch[1].trim();
79
+ }
80
+ } catch (error) {
81
+ console.warn(`Warning: Failed to parse product.md: ${error.message}`);
82
+ }
83
+ }
84
+
85
+ return metadata;
86
+ }
87
+
88
+ /**
89
+ * Parse constitution from steering/rules/constitution.md
90
+ * @param {string} projectPath
91
+ * @returns {Promise<import('../ir/types').ConstitutionIR>}
92
+ */
93
+ async function parseConstitution(projectPath) {
94
+ const constitutionPath = path.join(projectPath, 'steering', 'rules', 'constitution.md');
95
+
96
+ const constitution = {
97
+ articles: [],
98
+ corePrinciples: [],
99
+ governance: {
100
+ version: '1.0',
101
+ rules: [],
102
+ },
103
+ };
104
+
105
+ if (!await fs.pathExists(constitutionPath)) {
106
+ return constitution;
107
+ }
108
+
109
+ try {
110
+ const content = await fs.readFile(constitutionPath, 'utf-8');
111
+ constitution.rawContent = content;
112
+
113
+ // Parse articles
114
+ const articleRegex = /##\s+Article\s+(\d+)[:\s]+(.+?)(?=\n##|\n$|$)/gs;
115
+ let match;
116
+
117
+ while ((match = articleRegex.exec(content)) !== null) {
118
+ const articleNumber = parseInt(match[1], 10);
119
+ const articleContent = match[2].trim();
120
+
121
+ // Extract article name from first line
122
+ const lines = articleContent.split('\n');
123
+ const name = lines[0].trim();
124
+
125
+ // Extract description (everything after name until rules)
126
+ let description = '';
127
+ let rulesStart = -1;
128
+
129
+ for (let i = 1; i < lines.length; i++) {
130
+ if (lines[i].match(/^[-*]\s+/)) {
131
+ rulesStart = i;
132
+ break;
133
+ }
134
+ description += lines[i] + '\n';
135
+ }
136
+
137
+ // Extract rules (bullet points)
138
+ const rules = [];
139
+ if (rulesStart !== -1) {
140
+ for (let i = rulesStart; i < lines.length; i++) {
141
+ const ruleMatch = lines[i].match(/^[-*]\s+(.+)/);
142
+ if (ruleMatch) {
143
+ rules.push(ruleMatch[1].trim());
144
+ }
145
+ }
146
+ }
147
+
148
+ constitution.articles.push({
149
+ number: articleNumber,
150
+ name,
151
+ description: description.trim(),
152
+ rules,
153
+ });
154
+ }
155
+
156
+ // Parse governance section if present
157
+ const governanceMatch = content.match(/##\s+Governance\s*\n([\s\S]+?)(?=\n##|$)/);
158
+ if (governanceMatch) {
159
+ const governanceContent = governanceMatch[1];
160
+ const versionMatch = governanceContent.match(/version[:\s]+(\d+\.\d+)/i);
161
+ if (versionMatch) {
162
+ constitution.governance.version = versionMatch[1];
163
+ }
164
+
165
+ // Extract governance rules
166
+ const ruleLines = governanceContent.match(/^[-*]\s+(.+)/gm);
167
+ if (ruleLines) {
168
+ constitution.governance.rules = ruleLines.map(l => l.replace(/^[-*]\s+/, ''));
169
+ }
170
+ }
171
+ } catch (error) {
172
+ console.warn(`Warning: Failed to parse constitution: ${error.message}`);
173
+ }
174
+
175
+ return constitution;
176
+ }
177
+
178
+ /**
179
+ * Parse features from storage/specs/
180
+ * @param {string} projectPath
181
+ * @returns {Promise<import('../ir/types').FeatureIR[]>}
182
+ */
183
+ async function parseFeatures(projectPath) {
184
+ const specsPath = path.join(projectPath, 'storage', 'specs');
185
+ const features = [];
186
+
187
+ if (!await fs.pathExists(specsPath)) {
188
+ return features;
189
+ }
190
+
191
+ try {
192
+ const entries = await fs.readdir(specsPath, { withFileTypes: true });
193
+
194
+ for (const entry of entries) {
195
+ if (entry.isDirectory()) {
196
+ const featurePath = path.join(specsPath, entry.name);
197
+ const feature = await parseFeature(featurePath, entry.name);
198
+ if (feature) {
199
+ features.push(feature);
200
+ }
201
+ }
202
+ }
203
+ } catch (error) {
204
+ console.warn(`Warning: Failed to parse features: ${error.message}`);
205
+ }
206
+
207
+ return features;
208
+ }
209
+
210
+ /**
211
+ * Parse a single feature
212
+ * @param {string} featurePath
213
+ * @param {string} featureId
214
+ * @returns {Promise<import('../ir/types').FeatureIR|null>}
215
+ */
216
+ async function parseFeature(featurePath, featureId) {
217
+ const feature = createEmptyFeatureIR(featureId, featureId);
218
+
219
+ // Parse spec.md
220
+ const specPath = path.join(featurePath, 'spec.md');
221
+ if (await fs.pathExists(specPath)) {
222
+ feature.specification = await parseSpecification(specPath);
223
+ }
224
+
225
+ // Parse plan.md
226
+ const planPath = path.join(featurePath, 'plan.md');
227
+ if (await fs.pathExists(planPath)) {
228
+ feature.plan = await parsePlan(planPath);
229
+ }
230
+
231
+ // Parse tasks.md
232
+ const tasksPath = path.join(featurePath, 'tasks.md');
233
+ if (await fs.pathExists(tasksPath)) {
234
+ feature.tasks = await parseTasks(tasksPath);
235
+ }
236
+
237
+ // Parse research.md
238
+ const researchPath = path.join(featurePath, 'research.md');
239
+ if (await fs.pathExists(researchPath)) {
240
+ feature.research = await parseResearch(researchPath);
241
+ }
242
+
243
+ // Parse data-model.md
244
+ const dataModelPath = path.join(featurePath, 'data-model.md');
245
+ if (await fs.pathExists(dataModelPath)) {
246
+ feature.dataModel = await parseDataModel(dataModelPath);
247
+ }
248
+
249
+ // Parse contracts directory
250
+ const contractsPath = path.join(featurePath, 'contracts');
251
+ if (await fs.pathExists(contractsPath)) {
252
+ feature.contracts = await parseContracts(contractsPath);
253
+ }
254
+
255
+ return feature;
256
+ }
257
+
258
+ /**
259
+ * Parse specification file
260
+ * @param {string} specPath
261
+ * @returns {Promise<import('../ir/types').SpecificationIR>}
262
+ */
263
+ async function parseSpecification(specPath) {
264
+ const content = await fs.readFile(specPath, 'utf-8');
265
+
266
+ const specification = {
267
+ title: '',
268
+ description: '',
269
+ userScenarios: [],
270
+ requirements: [],
271
+ successCriteria: [],
272
+ rawContent: content,
273
+ };
274
+
275
+ // Extract title from first heading
276
+ const titleMatch = content.match(/^#\s+(.+)$/m);
277
+ if (titleMatch) {
278
+ specification.title = titleMatch[1].trim();
279
+ }
280
+
281
+ // Extract description (content before first ## heading)
282
+ const descMatch = content.match(/^#\s+.+\n([\s\S]+?)(?=\n##|$)/);
283
+ if (descMatch) {
284
+ specification.description = descMatch[1].trim();
285
+ }
286
+
287
+ // Parse EARS requirements
288
+ const requirementsSection = content.match(/##\s+Requirements?\s*\n([\s\S]+?)(?=\n##[^#]|$)/i);
289
+ if (requirementsSection) {
290
+ const reqContent = requirementsSection[1];
291
+
292
+ // Match requirement patterns like REQ-001, REQ-P0-001, etc.
293
+ // Use greedy match until next ### or ## (non-###) heading
294
+ const reqRegex = /###?\s+(REQ[-\w]+)[:\s]+([^#]+?)(?=\n###?\s+REQ|\n##[^#]|$)/gs;
295
+ let match;
296
+
297
+ while ((match = reqRegex.exec(reqContent)) !== null) {
298
+ const reqId = match[1];
299
+ const reqBody = match[2].trim();
300
+
301
+ // Extract EARS statement
302
+ const earsMatch = reqBody.match(/((?:WHEN|WHILE|WHERE|IF)[\s\S]+?SHALL[\s\S]+?\.)/i);
303
+ if (earsMatch) {
304
+ specification.requirements.push(createRequirementFromEARS(reqId, earsMatch[1]));
305
+ } else {
306
+ // Simple requirement without EARS pattern
307
+ specification.requirements.push({
308
+ id: reqId,
309
+ title: '',
310
+ pattern: 'ubiquitous',
311
+ priority: extractPriority(reqId),
312
+ action: reqBody,
313
+ statement: reqBody,
314
+ acceptanceCriteria: [],
315
+ });
316
+ }
317
+ }
318
+ }
319
+
320
+ // Parse success criteria
321
+ const successSection = content.match(/##\s+Success\s+Criteria\s*\n([\s\S]+?)(?=\n##|$)/i);
322
+ if (successSection) {
323
+ const criteriaContent = successSection[1];
324
+ const criteria = criteriaContent.match(/^[-*]\s+(.+)/gm);
325
+ if (criteria) {
326
+ specification.successCriteria = criteria.map(c => c.replace(/^[-*]\s+/, ''));
327
+ }
328
+ }
329
+
330
+ return specification;
331
+ }
332
+
333
+ /**
334
+ * Extract priority from requirement ID
335
+ * @param {string} reqId
336
+ * @returns {import('../ir/types').Priority}
337
+ */
338
+ function extractPriority(reqId) {
339
+ const match = reqId.match(/P(\d)/);
340
+ if (match) {
341
+ return `P${match[1]}`;
342
+ }
343
+ return 'P1'; // Default
344
+ }
345
+
346
+ /**
347
+ * Parse plan file
348
+ * @param {string} planPath
349
+ * @returns {Promise<import('../ir/types').PlanIR>}
350
+ */
351
+ async function parsePlan(planPath) {
352
+ const content = await fs.readFile(planPath, 'utf-8');
353
+
354
+ const plan = {
355
+ summary: '',
356
+ technicalContext: {
357
+ language: '',
358
+ version: '',
359
+ framework: '',
360
+ dependencies: [],
361
+ testing: '',
362
+ targetPlatform: '',
363
+ },
364
+ constitutionCheck: [],
365
+ projectStructure: {
366
+ type: 'single',
367
+ directories: [],
368
+ },
369
+ phases: [],
370
+ rawContent: content,
371
+ };
372
+
373
+ // Extract summary from first paragraph
374
+ const summaryMatch = content.match(/^#\s+.+\n([\s\S]+?)(?=\n##|$)/);
375
+ if (summaryMatch) {
376
+ plan.summary = summaryMatch[1].trim();
377
+ }
378
+
379
+ // Parse technical context
380
+ const techSection = content.match(/##\s+Technical\s+Context\s*\n([\s\S]+?)(?=\n##|$)/i);
381
+ if (techSection) {
382
+ const techContent = techSection[1];
383
+
384
+ // Extract key-value pairs
385
+ const langMatch = techContent.match(/language[:\s]+(.+)/i);
386
+ if (langMatch) plan.technicalContext.language = langMatch[1].trim();
387
+
388
+ const versionMatch = techContent.match(/version[:\s]+(.+)/i);
389
+ if (versionMatch) plan.technicalContext.version = versionMatch[1].trim();
390
+
391
+ const frameworkMatch = techContent.match(/framework[:\s]+(.+)/i);
392
+ if (frameworkMatch) plan.technicalContext.framework = frameworkMatch[1].trim();
393
+
394
+ const testingMatch = techContent.match(/testing[:\s]+(.+)/i);
395
+ if (testingMatch) plan.technicalContext.testing = testingMatch[1].trim();
396
+
397
+ const platformMatch = techContent.match(/platform[:\s]+(.+)/i);
398
+ if (platformMatch) plan.technicalContext.targetPlatform = platformMatch[1].trim();
399
+ }
400
+
401
+ // Parse phases
402
+ const phasesSection = content.match(/##\s+Phases?\s*\n([\s\S]+?)(?=\n##|$)/i);
403
+ if (phasesSection) {
404
+ const phasesContent = phasesSection[1];
405
+ const phaseRegex = /###\s+Phase\s+(\d+)[:\s]+(.+?)(?=\n###|\n##|$)/gs;
406
+ let match;
407
+
408
+ while ((match = phaseRegex.exec(phasesContent)) !== null) {
409
+ const phaseNumber = parseInt(match[1], 10);
410
+ const phaseContent = match[2].trim();
411
+ const lines = phaseContent.split('\n');
412
+
413
+ plan.phases.push({
414
+ number: phaseNumber,
415
+ name: lines[0].trim(),
416
+ purpose: '',
417
+ outputs: [],
418
+ tasks: [],
419
+ });
420
+ }
421
+ }
422
+
423
+ return plan;
424
+ }
425
+
426
+ /**
427
+ * Parse tasks file
428
+ * @param {string} tasksPath
429
+ * @returns {Promise<import('../ir/types').TaskIR[]>}
430
+ */
431
+ async function parseTasks(tasksPath) {
432
+ const content = await fs.readFile(tasksPath, 'utf-8');
433
+ const tasks = [];
434
+
435
+ // Match task lines like: - [ ] T001: Description
436
+ const taskRegex = /^[-*]\s+\[([xX ])\]\s+(T\d+)[:\s]+(.+)$/gm;
437
+ let match;
438
+
439
+ while ((match = taskRegex.exec(content)) !== null) {
440
+ const completed = match[1].toLowerCase() === 'x';
441
+ const taskId = match[2];
442
+ const description = match[3].trim();
443
+
444
+ // Extract phase from context
445
+ let phase = 1;
446
+ const phaseMatch = content.slice(0, match.index).match(/##\s+Phase\s+(\d+)/gi);
447
+ if (phaseMatch) {
448
+ const lastPhase = phaseMatch[phaseMatch.length - 1];
449
+ const phaseNum = lastPhase.match(/(\d+)/);
450
+ if (phaseNum) {
451
+ phase = parseInt(phaseNum[1], 10);
452
+ }
453
+ }
454
+
455
+ // Check for parallel marker [P]
456
+ const parallel = description.includes('[P]');
457
+
458
+ // Extract file path if present
459
+ const filePathMatch = description.match(/(?:at|in|path:)\s+([^\s]+)/i);
460
+ const filePath = filePathMatch ? filePathMatch[1] : undefined;
461
+
462
+ // Extract user story reference
463
+ const storyMatch = description.match(/\[US\d+\]/);
464
+ const userStory = storyMatch ? storyMatch[0].replace(/[\[\]]/g, '') : undefined;
465
+
466
+ tasks.push({
467
+ id: taskId,
468
+ description: description.replace(/\[P\]|\[US\d+\]/g, '').trim(),
469
+ phase,
470
+ userStory,
471
+ parallel,
472
+ filePath,
473
+ completed,
474
+ });
475
+ }
476
+
477
+ return tasks;
478
+ }
479
+
480
+ /**
481
+ * Parse research file
482
+ * @param {string} researchPath
483
+ * @returns {Promise<import('../ir/types').ResearchIR>}
484
+ */
485
+ async function parseResearch(researchPath) {
486
+ const content = await fs.readFile(researchPath, 'utf-8');
487
+
488
+ const research = {
489
+ decisions: [],
490
+ alternatives: [],
491
+ rawContent: content,
492
+ };
493
+
494
+ // Parse decisions section
495
+ const decisionsSection = content.match(/##\s+Decisions?\s*\n([\s\S]+?)(?=\n##|$)/i);
496
+ if (decisionsSection) {
497
+ const decisionContent = decisionsSection[1];
498
+ const decisionRegex = /###\s+(.+?)\n([\s\S]+?)(?=\n###|$)/g;
499
+ let match;
500
+
501
+ while ((match = decisionRegex.exec(decisionContent)) !== null) {
502
+ const topic = match[1].trim();
503
+ const body = match[2].trim();
504
+
505
+ const decisionMatch = body.match(/decision[:\s]+(.+)/i);
506
+ const rationaleMatch = body.match(/rationale[:\s]+(.+)/i);
507
+
508
+ research.decisions.push({
509
+ topic,
510
+ decision: decisionMatch ? decisionMatch[1].trim() : body.split('\n')[0],
511
+ rationale: rationaleMatch ? rationaleMatch[1].trim() : '',
512
+ });
513
+ }
514
+ }
515
+
516
+ // Parse alternatives section
517
+ const alternativesSection = content.match(/##\s+Alternatives?\s*\n([\s\S]+?)(?=\n##|$)/i);
518
+ if (alternativesSection) {
519
+ const altContent = alternativesSection[1];
520
+ const altRegex = /###\s+(.+?)\n([\s\S]+?)(?=\n###|$)/g;
521
+ let match;
522
+
523
+ while ((match = altRegex.exec(altContent)) !== null) {
524
+ const name = match[1].trim();
525
+ const body = match[2].trim();
526
+
527
+ // Extract pros and cons
528
+ const prosMatch = body.match(/pros?[:\s]*([\s\S]+?)(?=cons?|rejected|$)/i);
529
+ const consMatch = body.match(/cons?[:\s]*([\s\S]+?)(?=pros?|rejected|$)/i);
530
+ const rejectedMatch = body.match(/rejected[:\s]*(yes|no|true|false)/i);
531
+ const reasonMatch = body.match(/reason[:\s]+(.+)/i);
532
+
533
+ const pros = prosMatch
534
+ ? (prosMatch[1].match(/^[-*]\s+(.+)/gm) || []).map(p => p.replace(/^[-*]\s+/, ''))
535
+ : [];
536
+ const cons = consMatch
537
+ ? (consMatch[1].match(/^[-*]\s+(.+)/gm) || []).map(c => c.replace(/^[-*]\s+/, ''))
538
+ : [];
539
+
540
+ research.alternatives.push({
541
+ name,
542
+ pros,
543
+ cons,
544
+ rejected: rejectedMatch ? ['yes', 'true'].includes(rejectedMatch[1].toLowerCase()) : false,
545
+ reason: reasonMatch ? reasonMatch[1].trim() : undefined,
546
+ });
547
+ }
548
+ }
549
+
550
+ return research;
551
+ }
552
+
553
+ /**
554
+ * Parse data model file
555
+ * @param {string} dataModelPath
556
+ * @returns {Promise<import('../ir/types').DataModelIR>}
557
+ */
558
+ async function parseDataModel(dataModelPath) {
559
+ const content = await fs.readFile(dataModelPath, 'utf-8');
560
+
561
+ const dataModel = {
562
+ entities: [],
563
+ relationships: [],
564
+ rawContent: content,
565
+ };
566
+
567
+ // Parse entities (look for ### Entity: Name or ### Name patterns)
568
+ const entityRegex = /###\s+(?:Entity:?\s+)?(\w+)\s*\n([\s\S]+?)(?=\n###|$)/gi;
569
+ let match;
570
+
571
+ while ((match = entityRegex.exec(content)) !== null) {
572
+ const name = match[1];
573
+ const body = match[2].trim();
574
+
575
+ // Extract fields from table or list
576
+ const fields = [];
577
+ const fieldRegex = /[-*]\s+(\w+):\s+(.+)/g;
578
+ let fieldMatch;
579
+
580
+ while ((fieldMatch = fieldRegex.exec(body)) !== null) {
581
+ fields.push({
582
+ name: fieldMatch[1],
583
+ type: fieldMatch[2].trim(),
584
+ required: false,
585
+ unique: false,
586
+ });
587
+ }
588
+
589
+ dataModel.entities.push({
590
+ name,
591
+ description: '',
592
+ fields,
593
+ });
594
+ }
595
+
596
+ // Parse relationships
597
+ const relationshipSection = content.match(/##\s+Relationships?\s*\n([\s\S]+?)(?=\n##|$)/i);
598
+ if (relationshipSection) {
599
+ const relContent = relationshipSection[1];
600
+ const relRegex = /(\w+)\s*(?:→|->|has many|has one|belongs to)\s*(\w+)/gi;
601
+ let relMatch;
602
+
603
+ while ((relMatch = relRegex.exec(relContent)) !== null) {
604
+ dataModel.relationships.push({
605
+ from: relMatch[1],
606
+ to: relMatch[2],
607
+ type: relContent.toLowerCase().includes('many') ? 'one-to-many' : 'one-to-one',
608
+ });
609
+ }
610
+ }
611
+
612
+ return dataModel;
613
+ }
614
+
615
+ /**
616
+ * Parse contracts directory
617
+ * @param {string} contractsPath
618
+ * @returns {Promise<import('../ir/types').ContractIR[]>}
619
+ */
620
+ async function parseContracts(contractsPath) {
621
+ const contracts = [];
622
+
623
+ try {
624
+ const entries = await fs.readdir(contractsPath, { withFileTypes: true });
625
+
626
+ for (const entry of entries) {
627
+ if (entry.isFile() && entry.name.endsWith('.md')) {
628
+ const contractFile = path.join(contractsPath, entry.name);
629
+ const content = await fs.readFile(contractFile, 'utf-8');
630
+
631
+ // Determine contract type
632
+ let type = 'other';
633
+ if (content.includes('REST') || content.includes('GET') || content.includes('POST')) {
634
+ type = 'rest';
635
+ } else if (content.includes('GraphQL') || content.includes('query') || content.includes('mutation')) {
636
+ type = 'graphql';
637
+ } else if (content.includes('gRPC')) {
638
+ type = 'grpc';
639
+ } else if (content.includes('WebSocket')) {
640
+ type = 'websocket';
641
+ }
642
+
643
+ contracts.push({
644
+ type,
645
+ name: entry.name.replace('.md', ''),
646
+ definition: {},
647
+ rawContent: content,
648
+ });
649
+ }
650
+ }
651
+ } catch (error) {
652
+ console.warn(`Warning: Failed to parse contracts: ${error.message}`);
653
+ }
654
+
655
+ return contracts;
656
+ }
657
+
658
+ /**
659
+ * Parse templates from steering/templates/
660
+ * @param {string} projectPath
661
+ * @returns {Promise<import('../ir/types').TemplateIR[]>}
662
+ */
663
+ async function parseTemplates(projectPath) {
664
+ const templatesPath = path.join(projectPath, 'steering', 'templates');
665
+ const templates = [];
666
+
667
+ if (!await fs.pathExists(templatesPath)) {
668
+ return templates;
669
+ }
670
+
671
+ try {
672
+ const entries = await fs.readdir(templatesPath, { withFileTypes: true });
673
+
674
+ for (const entry of entries) {
675
+ if (entry.isFile() && entry.name.endsWith('.md')) {
676
+ const templateFile = path.join(templatesPath, entry.name);
677
+ const content = await fs.readFile(templateFile, 'utf-8');
678
+
679
+ // Determine template type
680
+ let type = 'other';
681
+ if (entry.name.includes('spec')) {
682
+ type = 'spec';
683
+ } else if (entry.name.includes('plan')) {
684
+ type = 'plan';
685
+ } else if (entry.name.includes('task')) {
686
+ type = 'tasks';
687
+ }
688
+
689
+ templates.push({
690
+ name: entry.name.replace('.md', ''),
691
+ type,
692
+ content,
693
+ });
694
+ }
695
+ }
696
+ } catch (error) {
697
+ console.warn(`Warning: Failed to parse templates: ${error.message}`);
698
+ }
699
+
700
+ return templates;
701
+ }
702
+
703
+ /**
704
+ * Parse memories from steering/memories/
705
+ * @param {string} projectPath
706
+ * @returns {Promise<import('../ir/types').MemoryIR[]>}
707
+ */
708
+ async function parseMemories(projectPath) {
709
+ const memoriesPath = path.join(projectPath, 'steering', 'memories');
710
+ const memories = [];
711
+
712
+ if (!await fs.pathExists(memoriesPath)) {
713
+ return memories;
714
+ }
715
+
716
+ try {
717
+ const entries = await fs.readdir(memoriesPath, { withFileTypes: true });
718
+
719
+ for (const entry of entries) {
720
+ if (entry.isFile() && entry.name.endsWith('.md')) {
721
+ const memoryFile = path.join(memoriesPath, entry.name);
722
+ const content = await fs.readFile(memoryFile, 'utf-8');
723
+
724
+ // Determine memory type
725
+ let type = 'context';
726
+ if (entry.name.includes('decision')) {
727
+ type = 'decision';
728
+ } else if (entry.name.includes('learning')) {
729
+ type = 'learning';
730
+ }
731
+
732
+ memories.push({
733
+ category: type,
734
+ entries: [{ content, source: entry.name }],
735
+ });
736
+ }
737
+ }
738
+ } catch (error) {
739
+ console.warn(`Warning: Failed to parse memories: ${error.message}`);
740
+ }
741
+
742
+ return memories;
743
+ }
744
+
745
+ module.exports = {
746
+ parseMusubiProject,
747
+ parseProjectMetadata,
748
+ parseConstitution,
749
+ parseFeatures,
750
+ parseFeature,
751
+ parseSpecification,
752
+ parsePlan,
753
+ parseTasks,
754
+ parseResearch,
755
+ parseDataModel,
756
+ parseContracts,
757
+ parseTemplates,
758
+ parseMemories,
759
+ };