@hyperdrive.bot/bmad-workflow 1.0.9 → 1.0.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/decompose.js +2 -1
- package/dist/models/provider.js +1 -1
- package/dist/services/agents/opencode-agent-runner.js +58 -40
- package/dist/services/orchestration/dependency-graph-executor.d.ts +2 -1
- package/dist/services/orchestration/dependency-graph-executor.js +4 -1
- package/dist/services/orchestration/task-decomposition-service.js +2 -0
- package/dist/types/task-graph.d.ts +2 -0
- package/package.json +1 -1
|
@@ -147,6 +147,7 @@ export default class Decompose extends Command {
|
|
|
147
147
|
filePattern: flags['file-pattern'],
|
|
148
148
|
goal: args.goal,
|
|
149
149
|
maxParallel: flags['max-parallel'],
|
|
150
|
+
model: flags.model,
|
|
150
151
|
perFile: flags['per-file'],
|
|
151
152
|
planOnly: flags['plan-only'],
|
|
152
153
|
storyFormat: flags['story-format'],
|
|
@@ -202,7 +203,7 @@ export default class Decompose extends Command {
|
|
|
202
203
|
}
|
|
203
204
|
}
|
|
204
205
|
this.log(colors.bold('\n⚙️ Phase 3: Executing task graph...\n'));
|
|
205
|
-
const executor = new DependencyGraphExecutor(taskGraph, createAgentRunner(flags.provider, this.logger), new BatchProcessor(flags['max-parallel'], 0, this.logger), this.fileManager, this.logger, flags.cwd);
|
|
206
|
+
const executor = new DependencyGraphExecutor(taskGraph, createAgentRunner(flags.provider, this.logger), new BatchProcessor(flags['max-parallel'], 0, this.logger), this.fileManager, this.logger, flags.cwd, flags.model);
|
|
206
207
|
const executionResult = await executor.execute((layerIndex, totalLayers, layerSize) => {
|
|
207
208
|
this.log(colors.info(`\n🔄 Starting Layer ${layerIndex + 1}/${totalLayers} (${layerSize} task${layerSize > 1 ? 's' : ''} in parallel)`));
|
|
208
209
|
}, (taskId, _layerIndex, _taskIndex, _totalTasks) => {
|
package/dist/models/provider.js
CHANGED
|
@@ -18,7 +18,7 @@ export const PROVIDER_CONFIGS = {
|
|
|
18
18
|
supportsFileReferences: false,
|
|
19
19
|
},
|
|
20
20
|
opencode: {
|
|
21
|
-
command: 'opencode',
|
|
21
|
+
command: process.env.HOME + '/.opencode/bin/opencode',
|
|
22
22
|
flags: ['run', '--format', 'json'],
|
|
23
23
|
modelFlag: '--model',
|
|
24
24
|
supportsFileReferences: true,
|
|
@@ -155,11 +155,11 @@ export class OpenCodeAgentRunner {
|
|
|
155
155
|
tempDir = await mkdtemp(join(tmpdir(), 'opencode-prompt-'));
|
|
156
156
|
tempFile = join(tempDir, 'prompt.txt');
|
|
157
157
|
await writeFile(tempFile, prompt, 'utf8');
|
|
158
|
-
// Build command
|
|
159
|
-
// Format: opencode run --format json [--model provider/model]
|
|
158
|
+
// Build command - use stdin to pass the prompt (handles newlines better)
|
|
159
|
+
// Format: cat tempfile | opencode run --format json [--model provider/model] -
|
|
160
160
|
const flags = this.config.flags.join(' ');
|
|
161
161
|
const modelArg = options.model ? `${this.config.modelFlag} ${options.model}` : '';
|
|
162
|
-
const command =
|
|
162
|
+
const command = `cat "${tempFile}" | ${this.config.command} ${flags} ${modelArg} -`.replace(/\s+/g, ' ').trim();
|
|
163
163
|
// Log the command being executed
|
|
164
164
|
this.logger.info({
|
|
165
165
|
command,
|
|
@@ -264,47 +264,65 @@ export class OpenCodeAgentRunner {
|
|
|
264
264
|
if (!stdoutData || stdoutData.trim().length === 0) {
|
|
265
265
|
return '';
|
|
266
266
|
}
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
return String(parsed.response);
|
|
282
|
-
}
|
|
283
|
-
if (parsed.output) {
|
|
284
|
-
return String(parsed.output);
|
|
285
|
-
}
|
|
286
|
-
if (parsed.text) {
|
|
287
|
-
return String(parsed.text);
|
|
288
|
-
}
|
|
289
|
-
// If we have a result array, try to extract content
|
|
290
|
-
if (Array.isArray(parsed)) {
|
|
291
|
-
const messages = parsed
|
|
292
|
-
.filter((item) => item.type === 'message' || item.content || item.text)
|
|
293
|
-
.map((item) => item.content || item.text || item.message || '')
|
|
294
|
-
.filter(Boolean);
|
|
295
|
-
if (messages.length > 0) {
|
|
296
|
-
return messages.join('\n');
|
|
267
|
+
// Open Code outputs NDJSON (newline-delimited JSON)
|
|
268
|
+
// Each line is a separate JSON object: step_start, text, step_finish
|
|
269
|
+
// We need to extract the "text" from entries with type="text"
|
|
270
|
+
const lines = stdoutData.trim().split('\n');
|
|
271
|
+
const textParts = [];
|
|
272
|
+
for (const line of lines) {
|
|
273
|
+
if (!line.trim())
|
|
274
|
+
continue;
|
|
275
|
+
try {
|
|
276
|
+
const parsed = JSON.parse(line);
|
|
277
|
+
// Handle JSON string (like "Just a string")
|
|
278
|
+
if (typeof parsed === 'string') {
|
|
279
|
+
textParts.push(parsed);
|
|
280
|
+
continue;
|
|
297
281
|
}
|
|
282
|
+
// Handle JSON array
|
|
283
|
+
if (Array.isArray(parsed)) {
|
|
284
|
+
for (const item of parsed) {
|
|
285
|
+
if (item.content)
|
|
286
|
+
textParts.push(item.content);
|
|
287
|
+
else if (item.text)
|
|
288
|
+
textParts.push(item.text);
|
|
289
|
+
else if (item.message)
|
|
290
|
+
textParts.push(item.message);
|
|
291
|
+
}
|
|
292
|
+
continue;
|
|
293
|
+
}
|
|
294
|
+
// Extract text from "text" type entries (Open Code NDJSON format)
|
|
295
|
+
if (parsed.type === 'text' && parsed.part?.text) {
|
|
296
|
+
textParts.push(parsed.part.text);
|
|
297
|
+
}
|
|
298
|
+
else if (parsed.text) {
|
|
299
|
+
textParts.push(parsed.text);
|
|
300
|
+
}
|
|
301
|
+
else if (parsed.content) {
|
|
302
|
+
textParts.push(parsed.content);
|
|
303
|
+
}
|
|
304
|
+
else if (parsed.message) {
|
|
305
|
+
textParts.push(parsed.message);
|
|
306
|
+
}
|
|
307
|
+
else if (parsed.response) {
|
|
308
|
+
textParts.push(parsed.response);
|
|
309
|
+
}
|
|
310
|
+
else if (parsed.output) {
|
|
311
|
+
textParts.push(parsed.output);
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
catch {
|
|
315
|
+
// If line isn't valid JSON, it might be plain text output
|
|
316
|
+
this.logger.debug({ lineLength: line.length }, 'Line is not JSON, treating as plain text');
|
|
317
|
+
textParts.push(line);
|
|
298
318
|
}
|
|
299
|
-
// Fallback: stringify the parsed object
|
|
300
|
-
this.logger.debug({ parsedKeys: Object.keys(parsed) }, 'Unknown JSON structure, returning stringified');
|
|
301
|
-
return JSON.stringify(parsed, null, 2);
|
|
302
319
|
}
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
this.logger.debug({ stdoutLength: stdoutData.length }, 'JSON parsing failed, returning raw output');
|
|
306
|
-
return stdoutData;
|
|
320
|
+
if (textParts.length > 0) {
|
|
321
|
+
return textParts.join('\n');
|
|
307
322
|
}
|
|
323
|
+
// Fallback: return raw output
|
|
324
|
+
this.logger.debug({ stdoutLength: stdoutData.length }, 'No text extracted, returning raw output');
|
|
325
|
+
return stdoutData;
|
|
308
326
|
}
|
|
309
327
|
/**
|
|
310
328
|
* Build detailed error context for debugging process failures
|
|
@@ -27,8 +27,9 @@ export declare class DependencyGraphExecutor {
|
|
|
27
27
|
private readonly cwd?;
|
|
28
28
|
private readonly fileManager;
|
|
29
29
|
private readonly logger;
|
|
30
|
+
private readonly model?;
|
|
30
31
|
private readonly taskGraph;
|
|
31
|
-
constructor(taskGraph: TaskGraph, agentRunner: AIProviderRunner, batchProcessor: BatchProcessor, fileManager: FileManager, logger: pino.Logger, cwd?: string);
|
|
32
|
+
constructor(taskGraph: TaskGraph, agentRunner: AIProviderRunner, batchProcessor: BatchProcessor, fileManager: FileManager, logger: pino.Logger, cwd?: string, model?: string);
|
|
32
33
|
/**
|
|
33
34
|
* Execute the entire task graph
|
|
34
35
|
*
|
|
@@ -14,15 +14,17 @@ export class DependencyGraphExecutor {
|
|
|
14
14
|
cwd;
|
|
15
15
|
fileManager;
|
|
16
16
|
logger;
|
|
17
|
+
model;
|
|
17
18
|
taskGraph;
|
|
18
19
|
// eslint-disable-next-line max-params -- Constructor dependencies will be refactored to config object in future
|
|
19
|
-
constructor(taskGraph, agentRunner, batchProcessor, fileManager, logger, cwd) {
|
|
20
|
+
constructor(taskGraph, agentRunner, batchProcessor, fileManager, logger, cwd, model) {
|
|
20
21
|
this.taskGraph = taskGraph;
|
|
21
22
|
this.agentRunner = agentRunner;
|
|
22
23
|
this.batchProcessor = batchProcessor;
|
|
23
24
|
this.fileManager = fileManager;
|
|
24
25
|
this.logger = logger;
|
|
25
26
|
this.cwd = cwd;
|
|
27
|
+
this.model = model;
|
|
26
28
|
}
|
|
27
29
|
/**
|
|
28
30
|
* Execute the entire task graph
|
|
@@ -306,6 +308,7 @@ Use the file at the path above to document:
|
|
|
306
308
|
// Execute agent
|
|
307
309
|
const result = await this.agentRunner.runAgent(fullPrompt, {
|
|
308
310
|
agentType,
|
|
311
|
+
model: this.model,
|
|
309
312
|
references: task.targetFiles, // Pass target files as references
|
|
310
313
|
timeout: task.estimatedMinutes * 60 * 1000 * 1.5, // 1.5x estimated time as buffer
|
|
311
314
|
});
|
|
@@ -41,6 +41,7 @@ export class TaskDecompositionService {
|
|
|
41
41
|
// Execute Claude agent to generate task graph
|
|
42
42
|
const result = await this.agentRunner.runAgent(prompt, {
|
|
43
43
|
agentType: (options.agent ?? 'architect'),
|
|
44
|
+
model: options.model,
|
|
44
45
|
references: options.contextFiles,
|
|
45
46
|
timeout: options.taskTimeout ?? 600_000, // 10 min default for planning
|
|
46
47
|
});
|
|
@@ -506,6 +507,7 @@ YOUR RESPONSE MUST START WITH "masterPrompt: |" - NO OTHER TEXT ALLOWED!`;
|
|
|
506
507
|
this.logger.info('Asking Claude to fix YAML errors');
|
|
507
508
|
const fixResult = await this.agentRunner.runAgent(fixPrompt, {
|
|
508
509
|
agentType: 'architect',
|
|
510
|
+
model: options.model,
|
|
509
511
|
timeout: 60_000, // 1 minute for fix
|
|
510
512
|
});
|
|
511
513
|
if (!fixResult.success) {
|
|
@@ -119,6 +119,8 @@ export interface DecomposeOptions {
|
|
|
119
119
|
goal: string;
|
|
120
120
|
/** Maximum number of tasks to run in parallel */
|
|
121
121
|
maxParallel?: number;
|
|
122
|
+
/** Model to use for AI provider */
|
|
123
|
+
model?: string;
|
|
122
124
|
/** Enable per-file task generation for file-heavy operations */
|
|
123
125
|
perFile?: boolean;
|
|
124
126
|
/** Plan only without executing */
|
package/package.json
CHANGED