@wundam/orchex 1.0.0-rc.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. package/LICENSE +65 -0
  2. package/README.md +332 -0
  3. package/bin/orchex.js +2 -0
  4. package/dist/artifacts.d.ts +132 -0
  5. package/dist/artifacts.js +832 -0
  6. package/dist/claude-executor.d.ts +31 -0
  7. package/dist/claude-executor.js +200 -0
  8. package/dist/commands.d.ts +36 -0
  9. package/dist/commands.js +264 -0
  10. package/dist/config.d.ts +100 -0
  11. package/dist/config.js +172 -0
  12. package/dist/context-builder.d.ts +46 -0
  13. package/dist/context-builder.js +506 -0
  14. package/dist/cost.d.ts +29 -0
  15. package/dist/cost.js +60 -0
  16. package/dist/execution-broadcaster.d.ts +18 -0
  17. package/dist/execution-broadcaster.js +17 -0
  18. package/dist/executors/base.d.ts +99 -0
  19. package/dist/executors/base.js +206 -0
  20. package/dist/executors/circuit-breaker.d.ts +36 -0
  21. package/dist/executors/circuit-breaker.js +109 -0
  22. package/dist/executors/deepseek-executor.d.ts +22 -0
  23. package/dist/executors/deepseek-executor.js +145 -0
  24. package/dist/executors/gemini-executor.d.ts +20 -0
  25. package/dist/executors/gemini-executor.js +176 -0
  26. package/dist/executors/index.d.ts +81 -0
  27. package/dist/executors/index.js +193 -0
  28. package/dist/executors/ollama-executor.d.ts +25 -0
  29. package/dist/executors/ollama-executor.js +184 -0
  30. package/dist/executors/openai-executor.d.ts +22 -0
  31. package/dist/executors/openai-executor.js +142 -0
  32. package/dist/index.d.ts +1 -0
  33. package/dist/index.js +115 -0
  34. package/dist/intelligence/anti-pattern-detector.d.ts +117 -0
  35. package/dist/intelligence/anti-pattern-detector.js +327 -0
  36. package/dist/intelligence/budget-enforcer.d.ts +119 -0
  37. package/dist/intelligence/budget-enforcer.js +226 -0
  38. package/dist/intelligence/context-optimizer.d.ts +111 -0
  39. package/dist/intelligence/context-optimizer.js +282 -0
  40. package/dist/intelligence/cost-tracker.d.ts +114 -0
  41. package/dist/intelligence/cost-tracker.js +183 -0
  42. package/dist/intelligence/deliverable-extractor.d.ts +134 -0
  43. package/dist/intelligence/deliverable-extractor.js +909 -0
  44. package/dist/intelligence/dependency-inferrer.d.ts +87 -0
  45. package/dist/intelligence/dependency-inferrer.js +403 -0
  46. package/dist/intelligence/diagnostics.d.ts +25 -0
  47. package/dist/intelligence/diagnostics.js +36 -0
  48. package/dist/intelligence/error-analyzer.d.ts +7 -0
  49. package/dist/intelligence/error-analyzer.js +76 -0
  50. package/dist/intelligence/file-chunker.d.ts +15 -0
  51. package/dist/intelligence/file-chunker.js +64 -0
  52. package/dist/intelligence/fix-stream-manager.d.ts +59 -0
  53. package/dist/intelligence/fix-stream-manager.js +212 -0
  54. package/dist/intelligence/heuristics.d.ts +23 -0
  55. package/dist/intelligence/heuristics.js +124 -0
  56. package/dist/intelligence/learning-engine.d.ts +157 -0
  57. package/dist/intelligence/learning-engine.js +433 -0
  58. package/dist/intelligence/learning-feedback.d.ts +96 -0
  59. package/dist/intelligence/learning-feedback.js +202 -0
  60. package/dist/intelligence/pattern-analyzer.d.ts +35 -0
  61. package/dist/intelligence/pattern-analyzer.js +189 -0
  62. package/dist/intelligence/plan-parser.d.ts +124 -0
  63. package/dist/intelligence/plan-parser.js +498 -0
  64. package/dist/intelligence/planner.d.ts +29 -0
  65. package/dist/intelligence/planner.js +86 -0
  66. package/dist/intelligence/self-healer.d.ts +16 -0
  67. package/dist/intelligence/self-healer.js +84 -0
  68. package/dist/intelligence/slicing-metrics.d.ts +62 -0
  69. package/dist/intelligence/slicing-metrics.js +202 -0
  70. package/dist/intelligence/slicing-templates.d.ts +81 -0
  71. package/dist/intelligence/slicing-templates.js +420 -0
  72. package/dist/intelligence/split-suggester.d.ts +69 -0
  73. package/dist/intelligence/split-suggester.js +176 -0
  74. package/dist/intelligence/stream-generator.d.ts +90 -0
  75. package/dist/intelligence/stream-generator.js +452 -0
  76. package/dist/logger.d.ts +34 -0
  77. package/dist/logger.js +83 -0
  78. package/dist/logging.d.ts +5 -0
  79. package/dist/logging.js +38 -0
  80. package/dist/manifest.d.ts +56 -0
  81. package/dist/manifest.js +254 -0
  82. package/dist/metrics.d.ts +35 -0
  83. package/dist/metrics.js +75 -0
  84. package/dist/orchestrator.d.ts +35 -0
  85. package/dist/orchestrator.js +723 -0
  86. package/dist/ownership.d.ts +44 -0
  87. package/dist/ownership.js +250 -0
  88. package/dist/semaphore.d.ts +12 -0
  89. package/dist/semaphore.js +34 -0
  90. package/dist/telemetry/telemetry-types.d.ts +85 -0
  91. package/dist/telemetry/telemetry-types.js +1 -0
  92. package/dist/tier-gating.d.ts +24 -0
  93. package/dist/tier-gating.js +88 -0
  94. package/dist/tiers.d.ts +92 -0
  95. package/dist/tiers.js +108 -0
  96. package/dist/tools.d.ts +18 -0
  97. package/dist/tools.js +1363 -0
  98. package/dist/types.d.ts +740 -0
  99. package/dist/types.js +160 -0
  100. package/dist/utils/ownership-validator.d.ts +6 -0
  101. package/dist/utils/ownership-validator.js +21 -0
  102. package/dist/waves.d.ts +21 -0
  103. package/dist/waves.js +146 -0
  104. package/package.json +120 -0
@@ -0,0 +1,90 @@
1
+ /**
2
+ * Stream definition generator.
3
+ * Converts deliverables to stream definitions ready for orchex init.
4
+ */
5
+ import type { StreamDefinitionInput } from '../types.js';
6
+ import type { Deliverable } from './deliverable-extractor.js';
7
+ import type { DependencyGraph } from './dependency-inferrer.js';
8
+ import { type StreamAnalysis } from './anti-pattern-detector.js';
9
+ import type { LearnDiagnostics } from './diagnostics.js';
10
+ /**
11
+ * Options for stream generation.
12
+ */
13
+ export interface StreamGeneratorOptions {
14
+ /** Default verify commands for all streams */
15
+ defaultVerify?: string[];
16
+ /** Whether to validate against anti-patterns */
17
+ validateAntiPatterns?: boolean;
18
+ /** Minimum plan length to avoid 'vague_plan' anti-pattern */
19
+ minPlanLength?: number;
20
+ /** Estimate timeout based on category */
21
+ estimateTimeout?: boolean;
22
+ /** Project directory for path validation (optional) */
23
+ projectDir?: string;
24
+ /** Diagnostics collector for the learn pipeline */
25
+ diagnostics?: LearnDiagnostics;
26
+ }
27
+ /**
28
+ * Result of stream generation.
29
+ */
30
+ export interface GeneratedStreams {
31
+ /** Stream definitions ready for orchex init */
32
+ streams: Record<string, StreamDefinitionInput>;
33
+ /** Any validation warnings */
34
+ warnings: string[];
35
+ /** Anti-pattern analysis results (if enabled) */
36
+ antiPatterns?: StreamAnalysis[];
37
+ /** Total stream count */
38
+ count: number;
39
+ }
40
+ /**
41
+ * Generate a plan string from deliverable content.
42
+ * Ensures the plan is descriptive enough to avoid 'vague_plan' anti-pattern.
43
+ * Includes code examples from the plan document when available.
44
+ */
45
+ export declare function generatePlan(deliverable: Deliverable, minLength?: number, maxCodeChars?: number): string;
46
+ /**
47
+ * Generate default verify commands based on category and owned files.
48
+ */
49
+ export declare function generateVerifyCommands(deliverable: Deliverable): string[];
50
+ /**
51
+ * Convert a single deliverable to a stream definition.
52
+ */
53
+ export declare function deliverableToStream(deliverable: Deliverable, deps: string[], options?: StreamGeneratorOptions): StreamDefinitionInput;
54
+ /**
55
+ * Generate stream definitions from deliverables and dependency graph.
56
+ */
57
+ export declare function generateStreams(deliverables: Deliverable[], graph: DependencyGraph, options?: StreamGeneratorOptions): GeneratedStreams;
58
+ /**
59
+ * Format generated streams for review.
60
+ */
61
+ export declare function formatStreamsForReview(result: GeneratedStreams): string;
62
+ /**
63
+ * Convert generated streams to the format expected by orchex init.
64
+ */
65
+ export declare function toInitFormat(result: GeneratedStreams): Record<string, StreamDefinitionInput>;
66
+ /**
67
+ * Extract prerequisite install commands from stream plan text.
68
+ *
69
+ * Scans all stream plans for:
70
+ * 1. Explicit commands: "npm install X Y Z"
71
+ * 2. Structured blocks: "Install dependencies:\n - X\n - Y"
72
+ *
73
+ * Returns deduplicated install commands for the main session to run.
74
+ */
75
+ export declare function extractPrerequisites(streams: Record<string, StreamDefinitionInput>): string[];
76
+ /**
77
+ * Result of path validation.
78
+ */
79
+ export interface PathValidationResult {
80
+ /** Streams with corrected paths */
81
+ streams: Record<string, StreamDefinitionInput>;
82
+ /** Warnings about path issues */
83
+ warnings: string[];
84
+ }
85
+ /**
86
+ * Validate and optionally correct file paths in stream definitions.
87
+ * When projectDir is available, checks that owns/reads paths exist on disk.
88
+ * Attempts fuzzy correction for reads paths when exactly one match is found.
89
+ */
90
+ export declare function validateAndCorrectPaths(streams: Record<string, StreamDefinitionInput>, projectDir?: string): PathValidationResult;
@@ -0,0 +1,452 @@
1
+ /**
2
+ * Stream definition generator.
3
+ * Converts deliverables to stream definitions ready for orchex init.
4
+ */
5
+ import * as fs from 'node:fs';
6
+ import * as path from 'node:path';
7
+ import { createDetector } from './anti-pattern-detector.js';
8
+ /**
9
+ * Default timeout estimates by category (in milliseconds).
10
+ */
11
+ const TIMEOUT_BY_CATEGORY = {
12
+ code: 120000, // 2 minutes - typical implementation
13
+ test: 90000, // 1.5 minutes - tests are usually simpler
14
+ docs: 60000, // 1 minute - documentation
15
+ tutorial: 90000, // 1.5 minutes - tutorials need examples
16
+ 'integration-guide': 90000,
17
+ 'api-reference': 60000,
18
+ migration: 180000, // 3 minutes - migrations can be complex
19
+ other: 120000,
20
+ };
21
+ /**
22
+ * Generate a plan string from deliverable content.
23
+ * Ensures the plan is descriptive enough to avoid 'vague_plan' anti-pattern.
24
+ * Includes code examples from the plan document when available.
25
+ */
26
+ export function generatePlan(deliverable, minLength = 5, maxCodeChars = 4000) {
27
+ // Start with the description
28
+ let plan = deliverable.description.trim();
29
+ // If description is too short, add context from name and category
30
+ const words = plan.split(/\s+/).filter(w => w.length > 0);
31
+ if (words.length < minLength) {
32
+ const verb = getActionVerb(deliverable.category);
33
+ const files = deliverable.ownedFiles.join(', ') || 'the target files';
34
+ plan = `${verb} ${deliverable.name}. ${plan}. Target: ${files}`;
35
+ }
36
+ // Append code examples if present (these are the LLM's most precise instructions)
37
+ if (deliverable.codeExamples.length > 0) {
38
+ const codeBlocks = [];
39
+ let totalChars = 0;
40
+ for (const ex of deliverable.codeExamples) {
41
+ const block = `\`\`\`${ex.language || ''}\n${ex.code}\n\`\`\``;
42
+ if (totalChars + block.length > maxCodeChars)
43
+ break;
44
+ codeBlocks.push(block);
45
+ totalChars += block.length;
46
+ }
47
+ if (codeBlocks.length > 0) {
48
+ plan += '\n\nReference code:\n' + codeBlocks.join('\n\n');
49
+ }
50
+ }
51
+ return plan;
52
+ }
53
+ /**
54
+ * Get an action verb appropriate for the category.
55
+ */
56
+ function getActionVerb(category) {
57
+ switch (category) {
58
+ case 'code': return 'Implement';
59
+ case 'test': return 'Write tests for';
60
+ case 'docs': return 'Document';
61
+ case 'tutorial': return 'Create tutorial for';
62
+ case 'integration-guide': return 'Write integration guide for';
63
+ case 'api-reference': return 'Generate API reference for';
64
+ case 'migration': return 'Migrate';
65
+ default: return 'Complete';
66
+ }
67
+ }
68
+ /**
69
+ * Generate default verify commands based on category and owned files.
70
+ */
71
+ export function generateVerifyCommands(deliverable) {
72
+ const commands = [];
73
+ // Always include build check for code files
74
+ const hasCodeFiles = deliverable.ownedFiles.some(f => f.endsWith('.ts') || f.endsWith('.tsx') || f.endsWith('.js') || f.endsWith('.jsx'));
75
+ if (hasCodeFiles) {
76
+ commands.push('npm run build');
77
+ }
78
+ // Add test run for test files
79
+ if (deliverable.category === 'test') {
80
+ const testFile = deliverable.ownedFiles.find(f => f.includes('.test.'));
81
+ if (testFile) {
82
+ commands.push(`npm test -- ${testFile}`);
83
+ }
84
+ else {
85
+ commands.push('npm test');
86
+ }
87
+ }
88
+ return commands;
89
+ }
90
+ /**
91
+ * Convert a single deliverable to a stream definition.
92
+ */
93
+ export function deliverableToStream(deliverable, deps, options = {}) {
94
+ const { minPlanLength = 5, estimateTimeout = true, defaultVerify } = options;
95
+ // Check for YAML-sourced verify/setup (attached by yamlToDeliverable)
96
+ const yamlExtras = deliverable;
97
+ const verify = yamlExtras._verify || defaultVerify || generateVerifyCommands(deliverable);
98
+ const setup = yamlExtras._setup;
99
+ // Use description directly if it came from YAML (already a plan).
100
+ // YAML-sourced deliverables have codeExamples: [] (set in yamlToDeliverable),
101
+ // so bypassing generatePlan() here does not drop code examples.
102
+ const hasYamlPlan = yamlExtras._verify !== undefined || yamlExtras._setup !== undefined;
103
+ const plan = hasYamlPlan && deliverable.description.length >= minPlanLength
104
+ ? deliverable.description
105
+ : generatePlan(deliverable, minPlanLength);
106
+ const stream = {
107
+ name: deliverable.name,
108
+ deps: deps.length > 0 ? deps : undefined,
109
+ owns: deliverable.ownedFiles.length > 0 ? deliverable.ownedFiles : undefined,
110
+ reads: deliverable.readFiles.length > 0 ? deliverable.readFiles : undefined,
111
+ plan,
112
+ verify: verify.length > 0 ? verify : undefined,
113
+ setup: setup && setup.length > 0 ? setup : undefined,
114
+ };
115
+ // Add timeout estimate if enabled
116
+ if (estimateTimeout) {
117
+ const timeout = TIMEOUT_BY_CATEGORY[deliverable.category];
118
+ // Increase timeout for deliverables with many files
119
+ const fileMultiplier = Math.max(1, Math.ceil(deliverable.ownedFiles.length / 2));
120
+ stream.timeoutMs = timeout * fileMultiplier;
121
+ }
122
+ return stream;
123
+ }
124
+ /**
125
+ * Generate stream definitions from deliverables and dependency graph.
126
+ */
127
+ export function generateStreams(deliverables, graph, options = {}) {
128
+ const { validateAntiPatterns = true } = options;
129
+ const streams = {};
130
+ const warnings = [];
131
+ const antiPatterns = [];
132
+ // Check for cycles
133
+ if (!graph.isAcyclic) {
134
+ warnings.push(`Dependency graph has cycles: ${graph.cycles.map(c => c.join(' → ')).join('; ')}`);
135
+ }
136
+ // Create detector once for all streams
137
+ const detector = validateAntiPatterns ? createDetector() : null;
138
+ // Generate streams
139
+ for (const deliverable of deliverables) {
140
+ const deps = graph.dependencies.get(deliverable.id) || [];
141
+ const stream = deliverableToStream(deliverable, deps, options);
142
+ // Validate against anti-patterns if enabled
143
+ if (detector) {
144
+ const analysis = detector.analyzeStream(deliverable.id, {
145
+ name: stream.name,
146
+ deps: stream.deps || [],
147
+ owns: stream.owns || [],
148
+ reads: stream.reads || [],
149
+ plan: stream.plan,
150
+ verify: stream.verify || [],
151
+ });
152
+ if (analysis.issues.length > 0) {
153
+ antiPatterns.push(analysis);
154
+ for (const issue of analysis.issues) {
155
+ warnings.push(`${deliverable.id}: ${issue.type} - ${issue.message}`);
156
+ }
157
+ }
158
+ }
159
+ // Check for too_complex (independent of anti-pattern detector)
160
+ const childCount = deliverable.childCount;
161
+ const planLength = stream.plan?.length ?? 0;
162
+ const ownsCount = (stream.owns || []).length;
163
+ if (childCount !== undefined && childCount >= 5 && planLength > 2500) {
164
+ warnings.push(`${deliverable.id}: too_complex - ${childCount} sub-sections, ${ownsCount} owned files, ~${planLength} char plan. ` +
165
+ `Consider: use YAML stream definitions for manual decomposition, or set deliverable_level: 3`);
166
+ }
167
+ else if (childCount !== undefined && childCount >= 3 && planLength > 3500) {
168
+ warnings.push(`${deliverable.id}: too_complex - ${childCount} sub-sections, ~${planLength} char plan. ` +
169
+ `Consider: use YAML stream definitions for finer control`);
170
+ }
171
+ // Check for large owned files when projectDir is available
172
+ if (options.projectDir) {
173
+ const ownedFiles = stream.owns || [];
174
+ for (const file of ownedFiles) {
175
+ try {
176
+ const fullPath = path.join(options.projectDir, file);
177
+ const content = fs.readFileSync(fullPath, 'utf-8');
178
+ const lineCount = content.split('\n').length;
179
+ if (lineCount > 800) {
180
+ const warning = `${deliverable.id}: large_file_critical - ${file} is ${lineCount} lines (>800). ` +
181
+ `Consider splitting this stream to reduce file scope`;
182
+ warnings.push(warning);
183
+ if (options.diagnostics) {
184
+ options.diagnostics.warnings.push(warning);
185
+ }
186
+ }
187
+ else if (lineCount > 500 && ownedFiles.length >= 3) {
188
+ const warning = `${deliverable.id}: large_file_risk - ${file} is ${lineCount} lines, stream owns ${ownedFiles.length} files. ` +
189
+ `Consider splitting to reduce complexity`;
190
+ warnings.push(warning);
191
+ if (options.diagnostics) {
192
+ options.diagnostics.warnings.push(warning);
193
+ }
194
+ }
195
+ }
196
+ catch {
197
+ // File doesn't exist yet (will be created) — skip
198
+ }
199
+ }
200
+ }
201
+ // Warn when a deliverable has no owned files
202
+ if (deliverable.ownedFiles.length === 0) {
203
+ const warning = `Stream '${deliverable.id}' has no owned files. It can read but not write. ` +
204
+ `If this is unintentional, check your Create:/Modify: syntax (must use backtick-quoted paths).`;
205
+ warnings.push(warning);
206
+ if (options.diagnostics) {
207
+ options.diagnostics.warnings.push(warning);
208
+ }
209
+ }
210
+ streams[deliverable.id] = stream;
211
+ }
212
+ // Validate and correct paths if projectDir available
213
+ if (options.projectDir) {
214
+ const validation = validateAndCorrectPaths(streams, options.projectDir);
215
+ warnings.push(...validation.warnings);
216
+ for (const [id, correctedStream] of Object.entries(validation.streams)) {
217
+ streams[id] = correctedStream;
218
+ }
219
+ }
220
+ return {
221
+ streams,
222
+ warnings,
223
+ antiPatterns: validateAntiPatterns ? antiPatterns : undefined,
224
+ count: deliverables.length,
225
+ };
226
+ }
227
+ /**
228
+ * Format generated streams for review.
229
+ */
230
+ export function formatStreamsForReview(result) {
231
+ const lines = ['=== Generated Streams ===', ''];
232
+ lines.push(`Total: ${result.count} streams`);
233
+ if (result.warnings.length > 0) {
234
+ lines.push('');
235
+ lines.push('--- Warnings ---');
236
+ for (const warning of result.warnings) {
237
+ lines.push(`⚠️ ${warning}`);
238
+ }
239
+ }
240
+ lines.push('');
241
+ lines.push('--- Stream Definitions ---');
242
+ for (const [id, stream] of Object.entries(result.streams)) {
243
+ lines.push('');
244
+ lines.push(`${id}:`);
245
+ lines.push(` name: ${stream.name}`);
246
+ if (stream.deps && stream.deps.length > 0) {
247
+ lines.push(` deps: [${stream.deps.join(', ')}]`);
248
+ }
249
+ if (stream.owns && stream.owns.length > 0) {
250
+ lines.push(` owns: [${stream.owns.join(', ')}]`);
251
+ }
252
+ if (stream.reads && stream.reads.length > 0) {
253
+ lines.push(` reads: [${stream.reads.join(', ')}]`);
254
+ }
255
+ if (stream.plan) {
256
+ lines.push(` plan: ${stream.plan.slice(0, 80)}${stream.plan.length > 80 ? '...' : ''}`);
257
+ }
258
+ if (stream.verify && stream.verify.length > 0) {
259
+ lines.push(` verify: [${stream.verify.join(', ')}]`);
260
+ }
261
+ if (stream.timeoutMs) {
262
+ lines.push(` timeoutMs: ${stream.timeoutMs}`);
263
+ }
264
+ }
265
+ return lines.join('\n');
266
+ }
267
+ /**
268
+ * Convert generated streams to the format expected by orchex init.
269
+ */
270
+ export function toInitFormat(result) {
271
+ return result.streams;
272
+ }
273
+ // ============================================================================
274
+ // Prerequisite Extraction
275
+ // ============================================================================
276
+ /**
277
+ * Package manager install patterns.
278
+ * Matches "npm install X", "pnpm add X", "yarn add X", "pip install X", "bun add X".
279
+ */
280
+ const INSTALL_CMD_RE = /(?:npm\s+install|pnpm\s+add|yarn\s+add|pip\s+install|bun\s+add)\s+(.+)/gi;
281
+ /**
282
+ * Matches "Install dependencies:" or "Install packages:" followed by list items.
283
+ * Captures the block of indented list items that follow.
284
+ */
285
+ const INSTALL_BLOCK_RE = /install\s+(?:dependencies|packages)\s*:\s*\n((?:\s+[-*]\s+.+\n?)+)/gi;
286
+ /**
287
+ * Extract a clean package name from a list item.
288
+ * Strips markdown formatting, parenthetical notes, and version specifiers.
289
+ * e.g. "- lucia" → "lucia", "- arctic (for OAuth)" → "arctic"
290
+ */
291
+ function cleanPackageName(raw) {
292
+ let name = raw.replace(/^\s*[-*]\s*/, '').trim();
293
+ name = name.replace(/\s*\(.*?\)\s*$/, '').trim();
294
+ name = name.replace(/[`"']/g, '').trim();
295
+ if (/^@?[a-z0-9][\w./-]*$/i.test(name)) {
296
+ return name;
297
+ }
298
+ return null;
299
+ }
300
+ /**
301
+ * Extract prerequisite install commands from stream plan text.
302
+ *
303
+ * Scans all stream plans for:
304
+ * 1. Explicit commands: "npm install X Y Z"
305
+ * 2. Structured blocks: "Install dependencies:\n - X\n - Y"
306
+ *
307
+ * Returns deduplicated install commands for the main session to run.
308
+ */
309
+ export function extractPrerequisites(streams) {
310
+ const packages = new Set();
311
+ for (const stream of Object.values(streams)) {
312
+ const plan = stream.plan ?? '';
313
+ let match;
314
+ INSTALL_CMD_RE.lastIndex = 0;
315
+ while ((match = INSTALL_CMD_RE.exec(plan)) !== null) {
316
+ const pkgs = match[1].split(/\s+/).filter(p => p && !p.startsWith('-'));
317
+ for (const pkg of pkgs) {
318
+ const clean = cleanPackageName(pkg);
319
+ if (clean)
320
+ packages.add(clean);
321
+ }
322
+ }
323
+ INSTALL_BLOCK_RE.lastIndex = 0;
324
+ while ((match = INSTALL_BLOCK_RE.exec(plan)) !== null) {
325
+ const lines = match[1].split('\n');
326
+ for (const line of lines) {
327
+ const clean = cleanPackageName(line);
328
+ if (clean)
329
+ packages.add(clean);
330
+ }
331
+ }
332
+ }
333
+ if (packages.size === 0)
334
+ return [];
335
+ return [`npm install ${[...packages].join(' ')}`];
336
+ }
337
+ /**
338
+ * Validate and optionally correct file paths in stream definitions.
339
+ * When projectDir is available, checks that owns/reads paths exist on disk.
340
+ * Attempts fuzzy correction for reads paths when exactly one match is found.
341
+ */
342
+ export function validateAndCorrectPaths(streams, projectDir) {
343
+ const warnings = [];
344
+ if (!projectDir) {
345
+ return { streams, warnings };
346
+ }
347
+ const corrected = {};
348
+ // Build set of all owned paths across all streams — these are virtual files
349
+ // that will be created by prior-wave streams, so they should NOT be fuzzy-corrected.
350
+ const allOwnedPaths = new Set();
351
+ for (const stream of Object.values(streams)) {
352
+ for (const p of stream.owns || [])
353
+ allOwnedPaths.add(p);
354
+ }
355
+ for (const [id, stream] of Object.entries(streams)) {
356
+ const newOwns = [...(stream.owns || [])];
357
+ const newReads = [...(stream.reads || [])];
358
+ // Validate owns paths (new files are OK if parent dir exists)
359
+ for (const filePath of newOwns) {
360
+ const abs = path.join(projectDir, filePath);
361
+ if (!fs.existsSync(abs)) {
362
+ const parentDir = path.dirname(abs);
363
+ if (!fs.existsSync(parentDir)) {
364
+ warnings.push(`${id}: path_not_found - owns path "${filePath}" not found and parent directory does not exist`);
365
+ }
366
+ }
367
+ }
368
+ // Validate and correct reads paths
369
+ const indicesToRemove = [];
370
+ for (let i = 0; i < newReads.length; i++) {
371
+ const filePath = newReads[i];
372
+ // Skip paths owned by any stream — they'll be created by prior waves
373
+ if (allOwnedPaths.has(filePath))
374
+ continue;
375
+ const abs = path.join(projectDir, filePath);
376
+ if (!fs.existsSync(abs)) {
377
+ // Attempt fuzzy correction: find files with same basename
378
+ const basename = path.basename(filePath);
379
+ const correction = findUniqueMatch(projectDir, basename);
380
+ if (correction.matches === 1 && correction.path) {
381
+ warnings.push(`${id}: path_corrected - reads path "${filePath}" corrected to "${correction.path}" (only match found)`);
382
+ newReads[i] = correction.path;
383
+ }
384
+ else if (correction.matches > 1) {
385
+ warnings.push(`${id}: path_ambiguous - reads path "${filePath}" not found, ${correction.matches} candidates exist`);
386
+ }
387
+ else {
388
+ // Remove phantom reads (not on disk, not owned, no fuzzy match)
389
+ warnings.push(`${id}: path_removed - reads path "${filePath}" not found on disk, removed from reads`);
390
+ indicesToRemove.push(i);
391
+ }
392
+ }
393
+ }
394
+ // Remove phantom reads (reverse order to preserve indices)
395
+ for (let i = indicesToRemove.length - 1; i >= 0; i--) {
396
+ newReads.splice(indicesToRemove[i], 1);
397
+ }
398
+ corrected[id] = {
399
+ ...stream,
400
+ owns: newOwns,
401
+ reads: newReads,
402
+ };
403
+ }
404
+ return { streams: corrected, warnings };
405
+ }
406
+ /**
407
+ * Find files matching a basename in the project directory.
408
+ * Returns the match count and the relative path if exactly one match.
409
+ */
410
+ function findUniqueMatch(projectDir, basename) {
411
+ const matches = [];
412
+ // Walk common source directories (avoid node_modules, .git, dist)
413
+ const searchDirs = ['src', 'tests', 'lib', 'scripts'];
414
+ for (const dir of searchDirs) {
415
+ const dirPath = path.join(projectDir, dir);
416
+ if (fs.existsSync(dirPath)) {
417
+ findFilesRecursive(dirPath, basename, matches);
418
+ }
419
+ }
420
+ // Also check root level
421
+ const rootFile = path.join(projectDir, basename);
422
+ if (fs.existsSync(rootFile) && fs.statSync(rootFile).isFile()) {
423
+ matches.push(basename);
424
+ }
425
+ if (matches.length === 1) {
426
+ return { matches: 1, path: matches[0] };
427
+ }
428
+ return { matches: matches.length };
429
+ }
430
+ /**
431
+ * Recursively find files with a given basename.
432
+ */
433
+ function findFilesRecursive(dir, basename, results, rootDir) {
434
+ const root = rootDir || path.dirname(dir);
435
+ try {
436
+ const entries = fs.readdirSync(dir, { withFileTypes: true });
437
+ for (const entry of entries) {
438
+ if (entry.name === 'node_modules' || entry.name === '.git' || entry.name === 'dist')
439
+ continue;
440
+ const fullPath = path.join(dir, entry.name);
441
+ if (entry.isDirectory()) {
442
+ findFilesRecursive(fullPath, basename, results, root);
443
+ }
444
+ else if (entry.name === basename) {
445
+ results.push(path.relative(root, fullPath));
446
+ }
447
+ }
448
+ }
449
+ catch {
450
+ // Permission errors, etc. — skip directory
451
+ }
452
+ }
@@ -0,0 +1,34 @@
1
+ /**
2
+ * Execution Logger
3
+ *
4
+ * Append-only file logger for execution visibility.
5
+ * Writes to .orchex/active/execution.log
6
+ */
7
+ export type LogLevel = 'INFO' | 'WARN' | 'ERROR';
8
+ export interface LogEntry {
9
+ level: LogLevel;
10
+ event: string;
11
+ data?: Record<string, unknown>;
12
+ }
13
+ /**
14
+ * Create a logger for a specific project directory.
15
+ */
16
+ export declare function createLogger(projectDir: string): {
17
+ info: (event: string, data?: Record<string, unknown>) => Promise<void>;
18
+ warn: (event: string, data?: Record<string, unknown>) => Promise<void>;
19
+ error: (event: string, data?: Record<string, unknown>) => Promise<void>;
20
+ /**
21
+ * Set the runId for log correlation. Once set, all subsequent log lines include it.
22
+ */
23
+ setRunId(id: string): void;
24
+ /**
25
+ * Start a heartbeat that logs progress every interval.
26
+ * Returns a stop function.
27
+ */
28
+ startHeartbeat(streamId: string, timeoutMs: number, intervalMs?: number): () => void;
29
+ };
30
+ export type Logger = ReturnType<typeof createLogger>;
31
+ /**
32
+ * Clear the execution log (called at start of new execution).
33
+ */
34
+ export declare function clearExecutionLog(projectDir: string): Promise<void>;
package/dist/logger.js ADDED
@@ -0,0 +1,83 @@
1
+ /**
2
+ * Execution Logger
3
+ *
4
+ * Append-only file logger for execution visibility.
5
+ * Writes to .orchex/active/execution.log
6
+ */
7
+ import * as fs from 'fs/promises';
8
+ import * as path from 'path';
9
+ const LOG_FILE = '.orchex/active/execution.log';
10
+ /**
11
+ * Format a log line with ISO timestamp.
12
+ */
13
+ function formatLogLine(level, event, data, runId) {
14
+ const timestamp = new Date().toISOString();
15
+ const runIdStr = runId ? ` runId=${runId}` : '';
16
+ const dataStr = data
17
+ ? ' ' + Object.entries(data).map(([k, v]) => `${k}=${v}`).join(' ')
18
+ : '';
19
+ return `${timestamp} [${level}] ${event}${runIdStr}${dataStr}\n`;
20
+ }
21
+ /**
22
+ * Create a logger for a specific project directory.
23
+ */
24
+ export function createLogger(projectDir) {
25
+ const logPath = path.join(projectDir, LOG_FILE);
26
+ let runId;
27
+ async function ensureLogDir() {
28
+ await fs.mkdir(path.dirname(logPath), { recursive: true });
29
+ }
30
+ async function log(level, event, data) {
31
+ try {
32
+ await ensureLogDir();
33
+ const line = formatLogLine(level, event, data, runId);
34
+ await fs.appendFile(logPath, line);
35
+ }
36
+ catch {
37
+ // Best effort - don't fail execution for logging issues
38
+ }
39
+ }
40
+ return {
41
+ info: (event, data) => log('INFO', event, data),
42
+ warn: (event, data) => log('WARN', event, data),
43
+ error: (event, data) => log('ERROR', event, data),
44
+ /**
45
+ * Set the runId for log correlation. Once set, all subsequent log lines include it.
46
+ */
47
+ setRunId(id) {
48
+ runId = id;
49
+ },
50
+ /**
51
+ * Start a heartbeat that logs progress every interval.
52
+ * Returns a stop function.
53
+ */
54
+ startHeartbeat(streamId, timeoutMs, intervalMs = 10000) {
55
+ const startTime = Date.now();
56
+ const interval = setInterval(() => {
57
+ const elapsed = Date.now() - startTime;
58
+ const remaining = Math.max(0, timeoutMs - elapsed);
59
+ const elapsedSec = Math.round(elapsed / 1000);
60
+ const remainingSec = Math.round(remaining / 1000);
61
+ const level = remaining < 15000 ? 'WARN' : 'INFO';
62
+ log(level, 'api_call_waiting', {
63
+ id: streamId,
64
+ elapsed: `${elapsedSec}s`,
65
+ remaining: `${remainingSec}s`,
66
+ });
67
+ }, intervalMs);
68
+ return () => clearInterval(interval);
69
+ },
70
+ };
71
+ }
72
+ /**
73
+ * Clear the execution log (called at start of new execution).
74
+ */
75
+ export async function clearExecutionLog(projectDir) {
76
+ const logPath = path.join(projectDir, LOG_FILE);
77
+ try {
78
+ await fs.writeFile(logPath, '');
79
+ }
80
+ catch {
81
+ // Ignore if file doesn't exist
82
+ }
83
+ }
@@ -0,0 +1,5 @@
1
+ import pino from 'pino';
2
+ export declare const logger: pino.Logger<never, boolean>;
3
+ export declare function createLogger(module: string): pino.Logger<never, boolean>;
4
+ export declare function createRequestLogger(requestId: string, userId?: string): pino.Logger<never, boolean>;
5
+ export declare function finalLogger(err: Error | null, evt: string): void;