opencode-orchestrator-plugin 1.0.0-beta.10 → 1.0.0-beta.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +27 -63
- package/dist/tools/commands.test.js +32 -35
- package/dist/utils/autogenerateFlow.d.ts +28 -0
- package/dist/utils/autogenerateFlow.js +176 -3
- package/dist/utils/autogenerateFlow.test.js +277 -4
- package/dist/utils/configDetection.d.ts +1 -0
- package/dist/utils/configDetection.js +22 -8
- package/dist/utils/configDetection.test.js +49 -4
- package/dist/utils/contextAnalysis.test.js +15 -17
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -6,73 +6,34 @@ import RevertPromptJson from "./prompts/orchestrator/revert.json" with { type: "
|
|
|
6
6
|
import SetupPromptJson from "./prompts/orchestrator/setup.json" with { type: "json" };
|
|
7
7
|
import StatusPromptJson from "./prompts/orchestrator/status.json" with { type: "json" };
|
|
8
8
|
import { detectOrchestratorConfig } from "./utils/configDetection.js";
|
|
9
|
-
import { buildIgnoreMatcher } from "./utils/ignoreMatcher.js";
|
|
10
9
|
const asPrompt = (prompt) => (typeof prompt === "string" ? prompt : "");
|
|
11
10
|
const asDescription = (description) => typeof description === "string" ? description : undefined;
|
|
12
11
|
export const MyPlugin = async ({ directory, }) => {
|
|
13
12
|
const orchestratorPath = path.join(directory, "orchestrator");
|
|
14
13
|
let fileHeirarchy = "";
|
|
15
|
-
const
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
gitignore: readIgnoreFile(gitIgnoreFile),
|
|
30
|
-
ignore: readIgnoreFile(ignoreFile),
|
|
31
|
-
geminiignore: readIgnoreFile(geminiIgnoreFile),
|
|
32
|
-
});
|
|
33
|
-
const directories = new Set();
|
|
34
|
-
const queue = [rootDir];
|
|
35
|
-
while (queue.length > 0) {
|
|
36
|
-
const current = queue.shift();
|
|
37
|
-
if (!current)
|
|
38
|
-
continue;
|
|
39
|
-
const entries = fs.readdirSync(current, { withFileTypes: true });
|
|
40
|
-
for (const entry of entries) {
|
|
41
|
-
const entryPath = path.join(current, entry.name);
|
|
42
|
-
const relativePath = path.relative(rootDir, entryPath) || ".";
|
|
43
|
-
if (entry.isDirectory()) {
|
|
44
|
-
const normalizedDir = relativePath === "." ? "" : relativePath;
|
|
45
|
-
if (!ig.shouldTraverse(normalizedDir)) {
|
|
46
|
-
continue;
|
|
47
|
-
}
|
|
48
|
-
if (!ig.ignores(normalizedDir)) {
|
|
49
|
-
directories.add(relativePath || ".");
|
|
50
|
-
}
|
|
51
|
-
queue.push(entryPath);
|
|
52
|
-
}
|
|
53
|
-
else if (ig.ignores(relativePath)) {
|
|
54
|
-
continue;
|
|
14
|
+
const getFilesRecursively = (dir) => {
|
|
15
|
+
let results = [];
|
|
16
|
+
if (!fs.existsSync(dir))
|
|
17
|
+
return results;
|
|
18
|
+
const list = fs.readdirSync(dir);
|
|
19
|
+
list.forEach((file) => {
|
|
20
|
+
const filePath = path.join(dir, file);
|
|
21
|
+
const stat = fs.statSync(filePath);
|
|
22
|
+
if (stat && stat.isDirectory()) {
|
|
23
|
+
results = results.concat(getFilesRecursively(filePath));
|
|
24
|
+
}
|
|
25
|
+
else {
|
|
26
|
+
if (filePath.endsWith(".json") || filePath.endsWith(".md")) {
|
|
27
|
+
results.push(filePath);
|
|
55
28
|
}
|
|
56
29
|
}
|
|
57
|
-
}
|
|
58
|
-
|
|
59
|
-
let summary = "";
|
|
60
|
-
let count = 0;
|
|
61
|
-
for (const dir of sorted) {
|
|
62
|
-
if (summary.length >= maxChars || count >= maxEntries)
|
|
63
|
-
break;
|
|
64
|
-
summary += `${dir}\n`;
|
|
65
|
-
count += 1;
|
|
66
|
-
}
|
|
67
|
-
if (summary.length >= maxChars || count >= maxEntries) {
|
|
68
|
-
summary += "...\n";
|
|
69
|
-
}
|
|
70
|
-
return summary.trim();
|
|
30
|
+
});
|
|
31
|
+
return results;
|
|
71
32
|
};
|
|
72
33
|
if (fs.existsSync(orchestratorPath)) {
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
.map((
|
|
34
|
+
const files = getFilesRecursively(orchestratorPath);
|
|
35
|
+
fileHeirarchy = files
|
|
36
|
+
.map((f) => path.relative(directory, f))
|
|
76
37
|
.join("\n ");
|
|
77
38
|
}
|
|
78
39
|
const isOrchestratorSetup = () => {
|
|
@@ -83,10 +44,13 @@ export const MyPlugin = async ({ directory, }) => {
|
|
|
83
44
|
const configDetection = detectOrchestratorConfig();
|
|
84
45
|
return {
|
|
85
46
|
config: async (_config) => {
|
|
47
|
+
const commandDefaults = configDetection.orchestratorModel
|
|
48
|
+
? { agent: "orchestrator", model: configDetection.orchestratorModel }
|
|
49
|
+
: { agent: "orchestrator" };
|
|
86
50
|
_config.command = {
|
|
87
51
|
..._config.command,
|
|
88
52
|
"orchestrator:implement": {
|
|
89
|
-
|
|
53
|
+
...commandDefaults,
|
|
90
54
|
template: asPrompt(ImplementPromptJson.prompt) + `
|
|
91
55
|
Environment Details:
|
|
92
56
|
- Directory: ${directory}
|
|
@@ -99,17 +63,17 @@ export const MyPlugin = async ({ directory, }) => {
|
|
|
99
63
|
description: asDescription(ImplementPromptJson.description),
|
|
100
64
|
},
|
|
101
65
|
"orchestrator:newTrack": {
|
|
102
|
-
|
|
66
|
+
...commandDefaults,
|
|
103
67
|
template: asPrompt(NewTrackPromptJson.prompt),
|
|
104
68
|
description: asDescription(NewTrackPromptJson.description),
|
|
105
69
|
},
|
|
106
70
|
"orchestrator:revert": {
|
|
107
|
-
|
|
71
|
+
...commandDefaults,
|
|
108
72
|
template: asPrompt(RevertPromptJson.prompt),
|
|
109
73
|
description: asDescription(RevertPromptJson.description),
|
|
110
74
|
},
|
|
111
75
|
"orchestrator:setup": {
|
|
112
|
-
|
|
76
|
+
...commandDefaults,
|
|
113
77
|
template: asPrompt(SetupPromptJson.prompt) + `
|
|
114
78
|
Environment Details:
|
|
115
79
|
- Directory: ${directory}
|
|
@@ -124,7 +88,7 @@ export const MyPlugin = async ({ directory, }) => {
|
|
|
124
88
|
description: asDescription(SetupPromptJson.description),
|
|
125
89
|
},
|
|
126
90
|
"orchestrator:status": {
|
|
127
|
-
|
|
91
|
+
...commandDefaults,
|
|
128
92
|
template: asPrompt(StatusPromptJson.prompt) + `
|
|
129
93
|
|
|
130
94
|
|
|
@@ -18,13 +18,10 @@ describe("Command Tools", () => {
|
|
|
18
18
|
sessionID: "test-session",
|
|
19
19
|
messageID: "test-message",
|
|
20
20
|
};
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
Test prompt content
|
|
26
|
-
"""
|
|
27
|
-
`);
|
|
21
|
+
vi.mocked(readFile).mockResolvedValue(JSON.stringify({
|
|
22
|
+
description: "Test command",
|
|
23
|
+
prompt: "Test prompt content"
|
|
24
|
+
}));
|
|
28
25
|
});
|
|
29
26
|
describe("createSetupTool", () => {
|
|
30
27
|
it("should create a tool with correct description", () => {
|
|
@@ -32,10 +29,10 @@ Test prompt content
|
|
|
32
29
|
expect(tool.description).toBe("Directives lookup tool for scaffolding the project and setting up the Orchestrator environment");
|
|
33
30
|
});
|
|
34
31
|
it("should return directives JSON string when executed", async () => {
|
|
35
|
-
vi.mocked(readFile).mockResolvedValue(
|
|
36
|
-
description
|
|
37
|
-
prompt
|
|
38
|
-
|
|
32
|
+
vi.mocked(readFile).mockResolvedValue(JSON.stringify({
|
|
33
|
+
description: "Setup",
|
|
34
|
+
prompt: "Setup Prompt"
|
|
35
|
+
}));
|
|
39
36
|
const tool = createSetupTool(mockCtx);
|
|
40
37
|
const result = await tool.execute({}, mockToolContext);
|
|
41
38
|
expect(JSON.parse(result)).toEqual({ directives: "Setup Prompt" });
|
|
@@ -51,10 +48,10 @@ prompt = """Setup Prompt"""
|
|
|
51
48
|
expect(tool.args).toHaveProperty("description");
|
|
52
49
|
});
|
|
53
50
|
it("should replace description in directives", async () => {
|
|
54
|
-
vi.mocked(readFile).mockResolvedValue(
|
|
55
|
-
description
|
|
56
|
-
prompt
|
|
57
|
-
|
|
51
|
+
vi.mocked(readFile).mockResolvedValue(JSON.stringify({
|
|
52
|
+
description: "New Track",
|
|
53
|
+
prompt: "Track description: {{args}}"
|
|
54
|
+
}));
|
|
58
55
|
const tool = createNewTrackTool(mockCtx);
|
|
59
56
|
const result = await tool.execute({ description: "Login feature" }, mockToolContext);
|
|
60
57
|
expect(JSON.parse(result)).toEqual({ directives: "Track description: Login feature" });
|
|
@@ -70,10 +67,10 @@ prompt = """Track description: {{args}}"""
|
|
|
70
67
|
expect(tool.args).toHaveProperty("track_name");
|
|
71
68
|
});
|
|
72
69
|
it("should replace track_name in directives", async () => {
|
|
73
|
-
vi.mocked(readFile).mockResolvedValue(
|
|
74
|
-
description
|
|
75
|
-
prompt
|
|
76
|
-
|
|
70
|
+
vi.mocked(readFile).mockResolvedValue(JSON.stringify({
|
|
71
|
+
description: "Implement",
|
|
72
|
+
prompt: "Track: {{track_name}}"
|
|
73
|
+
}));
|
|
77
74
|
const tool = createImplementTool(mockCtx);
|
|
78
75
|
const result = await tool.execute({ track_name: "auth-track" }, mockToolContext);
|
|
79
76
|
expect(JSON.parse(result)).toEqual({ directives: "Track: auth-track" });
|
|
@@ -83,10 +80,10 @@ prompt = """Track: {{track_name}}"""
|
|
|
83
80
|
if (typeof path === 'string' && path.endsWith("manual.md")) {
|
|
84
81
|
return "Manual Strategy";
|
|
85
82
|
}
|
|
86
|
-
return
|
|
87
|
-
description
|
|
88
|
-
prompt
|
|
89
|
-
|
|
83
|
+
return JSON.stringify({
|
|
84
|
+
description: "Implement",
|
|
85
|
+
prompt: "Strategy: {{strategy_section}}"
|
|
86
|
+
});
|
|
90
87
|
});
|
|
91
88
|
const tool = createImplementTool(mockCtx);
|
|
92
89
|
const result = await tool.execute({}, mockToolContext);
|
|
@@ -99,10 +96,10 @@ prompt = """Strategy: {{strategy_section}}"""
|
|
|
99
96
|
expect(tool.description).toBe("Directives lookup tool for displaying the current progress of the project");
|
|
100
97
|
});
|
|
101
98
|
it("should execute and return directives", async () => {
|
|
102
|
-
vi.mocked(readFile).mockResolvedValue(
|
|
103
|
-
description
|
|
104
|
-
prompt
|
|
105
|
-
|
|
99
|
+
vi.mocked(readFile).mockResolvedValue(JSON.stringify({
|
|
100
|
+
description: "Status",
|
|
101
|
+
prompt: "Status Prompt"
|
|
102
|
+
}));
|
|
106
103
|
const tool = createStatusTool(mockCtx);
|
|
107
104
|
const result = await tool.execute({}, mockToolContext);
|
|
108
105
|
expect(JSON.parse(result)).toEqual({ directives: "Status Prompt" });
|
|
@@ -114,10 +111,10 @@ prompt = """Status Prompt"""
|
|
|
114
111
|
expect(tool.description).toBe("Directives lookup tool for reverting previous work");
|
|
115
112
|
});
|
|
116
113
|
it("should replace target in directives", async () => {
|
|
117
|
-
vi.mocked(readFile).mockResolvedValue(
|
|
118
|
-
description
|
|
119
|
-
prompt
|
|
120
|
-
|
|
114
|
+
vi.mocked(readFile).mockResolvedValue(JSON.stringify({
|
|
115
|
+
description: "Revert",
|
|
116
|
+
prompt: "Target: {{target}}"
|
|
117
|
+
}));
|
|
121
118
|
const tool = createRevertTool(mockCtx);
|
|
122
119
|
const result = await tool.execute({ target: "track 1" }, mockToolContext);
|
|
123
120
|
expect(JSON.parse(result)).toEqual({ directives: "Target: track 1" });
|
|
@@ -133,10 +130,10 @@ prompt = """Target: {{target}}"""
|
|
|
133
130
|
});
|
|
134
131
|
describe("Prompt Replacement", () => {
|
|
135
132
|
it("should replace standard variables in directives", async () => {
|
|
136
|
-
vi.mocked(readFile).mockResolvedValue(
|
|
137
|
-
description
|
|
138
|
-
prompt
|
|
139
|
-
|
|
133
|
+
vi.mocked(readFile).mockResolvedValue(JSON.stringify({
|
|
134
|
+
description: "Test",
|
|
135
|
+
prompt: "Templates: {{templatesDir}}"
|
|
136
|
+
}));
|
|
140
137
|
const tool = createNewTrackTool(mockCtx);
|
|
141
138
|
const result = await tool.execute({}, mockToolContext);
|
|
142
139
|
expect(JSON.parse(result).directives).toContain("Templates:");
|
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
import { ProjectContext } from './contextAnalysis.js';
|
|
2
2
|
export type SectionType = 'product_guide' | 'product_guidelines' | 'tech_stack' | 'workflow';
|
|
3
3
|
export type UserChoice = 'A' | 'B' | 'C';
|
|
4
|
+
export type FallbackChoice = 'A' | 'B' | 'C';
|
|
4
5
|
export type FlowStatus = 'generating' | 'presented' | 'accepted' | 'edited' | 'regenerating';
|
|
6
|
+
export type FallbackAction = 'manual' | 'regenerate' | 'accept_partial';
|
|
5
7
|
export interface AutogenerationState {
|
|
6
8
|
questionId: string;
|
|
7
9
|
attemptNumber: number;
|
|
@@ -17,15 +19,38 @@ export interface FlowResult {
|
|
|
17
19
|
content?: string;
|
|
18
20
|
error?: string;
|
|
19
21
|
state?: AutogenerationState;
|
|
22
|
+
failureSummary?: string;
|
|
23
|
+
fallbackPrompt?: string;
|
|
24
|
+
fallbackAction?: FallbackAction;
|
|
25
|
+
fallbackState?: ContextSufficiencyResult;
|
|
26
|
+
ambiguityState?: AmbiguityResult;
|
|
20
27
|
}
|
|
21
28
|
export interface ValidationResult {
|
|
22
29
|
valid: boolean;
|
|
23
30
|
error?: string;
|
|
24
31
|
}
|
|
32
|
+
export interface ContextSufficiencyResult {
|
|
33
|
+
sufficient: boolean;
|
|
34
|
+
reason: string;
|
|
35
|
+
missing: string[];
|
|
36
|
+
}
|
|
37
|
+
export interface AmbiguityResult {
|
|
38
|
+
ambiguous: boolean;
|
|
39
|
+
reason: string;
|
|
40
|
+
signals: string[];
|
|
41
|
+
}
|
|
25
42
|
export declare const MAX_REGENERATION_ATTEMPTS = 3;
|
|
43
|
+
export declare const MIN_CONTEXT_SIGNALS = 2;
|
|
44
|
+
export declare const MIN_CONFIDENCE_THRESHOLD = 0.4;
|
|
26
45
|
export declare function createInitialState(questionId: string): AutogenerationState;
|
|
27
46
|
export declare function generateContentForSection(sectionType: SectionType, context: ProjectContext): FlowResult;
|
|
28
47
|
export declare function validateContent(content: string, sectionType: SectionType): ValidationResult;
|
|
48
|
+
export declare function evaluateContextSufficiency(context: ProjectContext): ContextSufficiencyResult;
|
|
49
|
+
export declare function formatAutogenerationFailure(sectionType: SectionType, error: string, sufficiency: ContextSufficiencyResult, ambiguity: AmbiguityResult): string;
|
|
50
|
+
export declare function evaluateAmbiguity(context: ProjectContext): AmbiguityResult;
|
|
51
|
+
export declare function formatFallbackPrompt(reason: string, missing: string[]): string;
|
|
52
|
+
export declare function parseFallbackChoice(input: string): FallbackChoice | null;
|
|
53
|
+
export declare function resolveFallbackAction(choice: FallbackChoice | null): FallbackAction;
|
|
29
54
|
export declare function handleAccept(state: AutogenerationState, sectionType: SectionType): FlowResult;
|
|
30
55
|
export declare function handleEdit(state: AutogenerationState, editedContent: string, sectionType: SectionType): FlowResult;
|
|
31
56
|
export declare function handleRegenerate(state: AutogenerationState, guidance: string, sectionType: SectionType, context: ProjectContext): FlowResult;
|
|
@@ -35,3 +60,6 @@ export declare function formatRegeneratePrompt(): string;
|
|
|
35
60
|
export declare function formatMaxAttemptsPrompt(): string;
|
|
36
61
|
export declare function parseUserChoice(input: string): UserChoice | null;
|
|
37
62
|
export declare function isCancel(input: string): boolean;
|
|
63
|
+
export declare function handleFallbackManual(sectionType: SectionType, reason: string): FlowResult;
|
|
64
|
+
export declare function handleFallbackAcceptPartial(partialContent: string, sectionType: SectionType, reason: string): FlowResult;
|
|
65
|
+
export declare function handleFallbackRegenerate(sectionType: SectionType, context: ProjectContext, additionalContext: string): FlowResult;
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import { generateProductGuide, generateProductGuidelines, generateTechStack, generateWorkflow, } from './contentGeneration.js';
|
|
2
2
|
export const MAX_REGENERATION_ATTEMPTS = 3;
|
|
3
|
+
export const MIN_CONTEXT_SIGNALS = 2;
|
|
4
|
+
export const MIN_CONFIDENCE_THRESHOLD = 0.4;
|
|
3
5
|
export function createInitialState(questionId) {
|
|
4
6
|
return {
|
|
5
7
|
questionId,
|
|
@@ -31,10 +33,20 @@ export function generateContentForSection(sectionType, context) {
|
|
|
31
33
|
default:
|
|
32
34
|
return { success: false, error: `Unknown section type: ${sectionType}` };
|
|
33
35
|
}
|
|
34
|
-
if (generationResult.confidence <
|
|
36
|
+
if (generationResult.confidence < MIN_CONFIDENCE_THRESHOLD) {
|
|
37
|
+
const sufficiency = evaluateContextSufficiency(context);
|
|
38
|
+
const ambiguity = evaluateAmbiguity(context);
|
|
39
|
+
const error = 'Insufficient context for reliable autogeneration';
|
|
40
|
+
const failureSummary = formatAutogenerationFailure(sectionType, error, sufficiency, ambiguity);
|
|
41
|
+
const fallbackPrompt = formatFallbackPrompt(error, sufficiency.missing);
|
|
35
42
|
return {
|
|
36
43
|
success: false,
|
|
37
|
-
error
|
|
44
|
+
error,
|
|
45
|
+
failureSummary,
|
|
46
|
+
fallbackPrompt,
|
|
47
|
+
fallbackAction: resolveFallbackAction(null),
|
|
48
|
+
fallbackState: sufficiency,
|
|
49
|
+
ambiguityState: ambiguity,
|
|
38
50
|
};
|
|
39
51
|
}
|
|
40
52
|
return {
|
|
@@ -43,9 +55,19 @@ export function generateContentForSection(sectionType, context) {
|
|
|
43
55
|
};
|
|
44
56
|
}
|
|
45
57
|
catch (error) {
|
|
58
|
+
const message = error instanceof Error ? error.message : 'Unknown error during generation';
|
|
59
|
+
const sufficiency = evaluateContextSufficiency(context);
|
|
60
|
+
const ambiguity = evaluateAmbiguity(context);
|
|
61
|
+
const failureSummary = formatAutogenerationFailure(sectionType, message, sufficiency, ambiguity);
|
|
62
|
+
const fallbackPrompt = formatFallbackPrompt(message, sufficiency.missing);
|
|
46
63
|
return {
|
|
47
64
|
success: false,
|
|
48
|
-
error:
|
|
65
|
+
error: message,
|
|
66
|
+
failureSummary,
|
|
67
|
+
fallbackPrompt,
|
|
68
|
+
fallbackAction: resolveFallbackAction(null),
|
|
69
|
+
fallbackState: sufficiency,
|
|
70
|
+
ambiguityState: ambiguity,
|
|
49
71
|
};
|
|
50
72
|
}
|
|
51
73
|
}
|
|
@@ -77,6 +99,132 @@ export function validateContent(content, sectionType) {
|
|
|
77
99
|
}
|
|
78
100
|
return { valid: true };
|
|
79
101
|
}
|
|
102
|
+
export function evaluateContextSufficiency(context) {
|
|
103
|
+
const missing = [];
|
|
104
|
+
const signals = [];
|
|
105
|
+
const hasManifests = context.raw.manifests.length > 0;
|
|
106
|
+
const hasDocs = context.raw.docs.length > 0;
|
|
107
|
+
const hasGit = context.raw.git.commitCount > 0;
|
|
108
|
+
const hasStructure = context.raw.structure.structure.length > 0;
|
|
109
|
+
const hasIgnores = context.raw.ignores.patterns.length > 0;
|
|
110
|
+
const hasCicd = context.raw.cicd.length > 0;
|
|
111
|
+
if (!hasManifests)
|
|
112
|
+
missing.push('manifest files');
|
|
113
|
+
if (!hasDocs)
|
|
114
|
+
missing.push('documentation');
|
|
115
|
+
if (!hasGit)
|
|
116
|
+
missing.push('git history');
|
|
117
|
+
if (!hasStructure)
|
|
118
|
+
missing.push('source structure');
|
|
119
|
+
if (!hasIgnores)
|
|
120
|
+
missing.push('ignore patterns');
|
|
121
|
+
if (!hasCicd)
|
|
122
|
+
missing.push('CI/CD configuration');
|
|
123
|
+
if (hasManifests)
|
|
124
|
+
signals.push('manifests');
|
|
125
|
+
if (hasDocs)
|
|
126
|
+
signals.push('docs');
|
|
127
|
+
if (hasGit)
|
|
128
|
+
signals.push('git');
|
|
129
|
+
if (hasStructure)
|
|
130
|
+
signals.push('structure');
|
|
131
|
+
if (hasIgnores)
|
|
132
|
+
signals.push('ignores');
|
|
133
|
+
if (hasCicd)
|
|
134
|
+
signals.push('cicd');
|
|
135
|
+
if (signals.length >= MIN_CONTEXT_SIGNALS) {
|
|
136
|
+
return {
|
|
137
|
+
sufficient: true,
|
|
138
|
+
reason: `Detected ${signals.length} context sources: ${signals.join(', ')}`,
|
|
139
|
+
missing,
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
return {
|
|
143
|
+
sufficient: false,
|
|
144
|
+
reason: 'Not enough project context sources to autogenerate reliably',
|
|
145
|
+
missing,
|
|
146
|
+
};
|
|
147
|
+
}
|
|
148
|
+
export function formatAutogenerationFailure(sectionType, error, sufficiency, ambiguity) {
|
|
149
|
+
const missingList = sufficiency.missing.length > 0
|
|
150
|
+
? `Missing: ${sufficiency.missing.join(', ')}`
|
|
151
|
+
: 'Missing: none detected';
|
|
152
|
+
const ambiguityList = ambiguity.signals.length > 0
|
|
153
|
+
? `Ambiguity signals: ${ambiguity.signals.join('; ')}`
|
|
154
|
+
: 'Ambiguity signals: none';
|
|
155
|
+
return `Autogeneration failed for ${sectionType}.
|
|
156
|
+
Reason: ${error}
|
|
157
|
+
${sufficiency.reason}
|
|
158
|
+
${missingList}
|
|
159
|
+
${ambiguityList}`;
|
|
160
|
+
}
|
|
161
|
+
export function evaluateAmbiguity(context) {
|
|
162
|
+
const signals = [];
|
|
163
|
+
const warnings = context.meta.warnings;
|
|
164
|
+
if (warnings.some((warning) => warning.toLowerCase().includes('no manifest'))) {
|
|
165
|
+
signals.push('Missing manifest files');
|
|
166
|
+
}
|
|
167
|
+
if (warnings.some((warning) => warning.toLowerCase().includes('no readme'))) {
|
|
168
|
+
signals.push('Missing README documentation');
|
|
169
|
+
}
|
|
170
|
+
if (context.insights.projectType === 'unknown') {
|
|
171
|
+
signals.push('Unknown project type');
|
|
172
|
+
}
|
|
173
|
+
if (context.insights.techStack.languages.length === 0) {
|
|
174
|
+
signals.push('No dominant language detected');
|
|
175
|
+
}
|
|
176
|
+
if (context.meta.filesAnalyzed === 0) {
|
|
177
|
+
signals.push('No files analyzed');
|
|
178
|
+
}
|
|
179
|
+
if (signals.length === 0) {
|
|
180
|
+
return { ambiguous: false, reason: 'Context appears coherent', signals };
|
|
181
|
+
}
|
|
182
|
+
return {
|
|
183
|
+
ambiguous: true,
|
|
184
|
+
reason: 'Project context appears ambiguous or incomplete',
|
|
185
|
+
signals,
|
|
186
|
+
};
|
|
187
|
+
}
|
|
188
|
+
export function formatFallbackPrompt(reason, missing) {
|
|
189
|
+
const missingList = missing.length > 0
|
|
190
|
+
? `Missing: ${missing.join(', ')}`
|
|
191
|
+
: 'No specific missing data identified.';
|
|
192
|
+
return `Autogeneration could not proceed. ${reason}
|
|
193
|
+
|
|
194
|
+
${missingList}
|
|
195
|
+
|
|
196
|
+
Please choose how to continue:
|
|
197
|
+
A) Continue with manual Q&A
|
|
198
|
+
B) Provide more context for regeneration
|
|
199
|
+
C) Accept partial results (use what was generated)
|
|
200
|
+
|
|
201
|
+
Please enter A, B, or C:`;
|
|
202
|
+
}
|
|
203
|
+
export function parseFallbackChoice(input) {
|
|
204
|
+
const normalized = input.trim().toUpperCase();
|
|
205
|
+
if (['A', 'MANUAL', 'M'].includes(normalized)) {
|
|
206
|
+
return 'A';
|
|
207
|
+
}
|
|
208
|
+
if (['B', 'CONTEXT', 'REGENERATE', 'R'].includes(normalized)) {
|
|
209
|
+
return 'B';
|
|
210
|
+
}
|
|
211
|
+
if (['C', 'PARTIAL', 'ACCEPT'].includes(normalized)) {
|
|
212
|
+
return 'C';
|
|
213
|
+
}
|
|
214
|
+
return null;
|
|
215
|
+
}
|
|
216
|
+
export function resolveFallbackAction(choice) {
|
|
217
|
+
switch (choice) {
|
|
218
|
+
case 'A':
|
|
219
|
+
return 'manual';
|
|
220
|
+
case 'B':
|
|
221
|
+
return 'regenerate';
|
|
222
|
+
case 'C':
|
|
223
|
+
return 'accept_partial';
|
|
224
|
+
default:
|
|
225
|
+
return 'manual';
|
|
226
|
+
}
|
|
227
|
+
}
|
|
80
228
|
export function handleAccept(state, sectionType) {
|
|
81
229
|
const validation = validateContent(state.content, sectionType);
|
|
82
230
|
if (!validation.valid) {
|
|
@@ -216,3 +364,28 @@ export function parseUserChoice(input) {
|
|
|
216
364
|
export function isCancel(input) {
|
|
217
365
|
return input.trim().toLowerCase() === 'cancel';
|
|
218
366
|
}
|
|
367
|
+
export function handleFallbackManual(sectionType, reason) {
|
|
368
|
+
return {
|
|
369
|
+
success: false,
|
|
370
|
+
error: `Fallback to manual Q&A: ${reason}`,
|
|
371
|
+
fallbackAction: 'manual',
|
|
372
|
+
};
|
|
373
|
+
}
|
|
374
|
+
export function handleFallbackAcceptPartial(partialContent, sectionType, reason) {
|
|
375
|
+
const validation = validateContent(partialContent, sectionType);
|
|
376
|
+
if (!validation.valid) {
|
|
377
|
+
return {
|
|
378
|
+
success: false,
|
|
379
|
+
error: `Partial content is invalid: ${validation.error}. Falling back to manual Q&A.`,
|
|
380
|
+
fallbackAction: 'manual',
|
|
381
|
+
};
|
|
382
|
+
}
|
|
383
|
+
return {
|
|
384
|
+
success: true,
|
|
385
|
+
content: partialContent,
|
|
386
|
+
fallbackAction: 'accept_partial',
|
|
387
|
+
};
|
|
388
|
+
}
|
|
389
|
+
export function handleFallbackRegenerate(sectionType, context, additionalContext) {
|
|
390
|
+
return generateContentForSection(sectionType, context);
|
|
391
|
+
}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { describe, it, expect, beforeEach } from 'vitest';
|
|
2
|
-
import { createInitialState, generateContentForSection, validateContent, handleAccept, handleEdit, handleRegenerate, formatPresentationPrompt, formatEditPrompt, formatRegeneratePrompt, formatMaxAttemptsPrompt, parseUserChoice, isCancel, MAX_REGENERATION_ATTEMPTS, } from './autogenerateFlow.js';
|
|
2
|
+
import { createInitialState, generateContentForSection, validateContent, handleAccept, handleEdit, handleRegenerate, formatPresentationPrompt, formatEditPrompt, formatRegeneratePrompt, formatMaxAttemptsPrompt, parseUserChoice, isCancel, MAX_REGENERATION_ATTEMPTS, MIN_CONTEXT_SIGNALS, evaluateContextSufficiency, evaluateAmbiguity, formatFallbackPrompt, formatAutogenerationFailure, parseFallbackChoice, resolveFallbackAction, handleFallbackManual, handleFallbackAcceptPartial, handleFallbackRegenerate, } from './autogenerateFlow.js';
|
|
3
3
|
describe('autogenerateFlow', () => {
|
|
4
4
|
let mockContext;
|
|
5
5
|
let initialState;
|
|
@@ -87,6 +87,47 @@ describe('autogenerateFlow', () => {
|
|
|
87
87
|
expect(result.content).toBeTruthy();
|
|
88
88
|
expect(result.error).toBeUndefined();
|
|
89
89
|
});
|
|
90
|
+
it('should report failure when confidence below threshold', () => {
|
|
91
|
+
const lowConfidenceContext = {
|
|
92
|
+
...mockContext,
|
|
93
|
+
insights: {
|
|
94
|
+
...mockContext.insights,
|
|
95
|
+
techStack: {
|
|
96
|
+
...mockContext.insights.techStack,
|
|
97
|
+
frameworks: [],
|
|
98
|
+
testing: [],
|
|
99
|
+
},
|
|
100
|
+
workflow: {
|
|
101
|
+
...mockContext.insights.workflow,
|
|
102
|
+
testingStrategy: 'unknown',
|
|
103
|
+
commitConvention: 'none',
|
|
104
|
+
},
|
|
105
|
+
},
|
|
106
|
+
raw: {
|
|
107
|
+
...mockContext.raw,
|
|
108
|
+
docs: [],
|
|
109
|
+
cicd: [],
|
|
110
|
+
manifests: [],
|
|
111
|
+
git: {
|
|
112
|
+
commitCount: 0,
|
|
113
|
+
conventionalCommits: 0,
|
|
114
|
+
patterns: [],
|
|
115
|
+
warnings: [],
|
|
116
|
+
},
|
|
117
|
+
structure: {
|
|
118
|
+
structure: [],
|
|
119
|
+
fileExtensions: [],
|
|
120
|
+
warnings: [],
|
|
121
|
+
},
|
|
122
|
+
},
|
|
123
|
+
};
|
|
124
|
+
const result = generateContentForSection('workflow', lowConfidenceContext);
|
|
125
|
+
expect(result.success).toBe(false);
|
|
126
|
+
expect(result.error).toContain('Insufficient context');
|
|
127
|
+
expect(result.failureSummary).toContain('Autogeneration failed');
|
|
128
|
+
expect(result.fallbackPrompt).toContain('Continue with manual Q&A');
|
|
129
|
+
expect(result.fallbackState?.sufficient).toBe(false);
|
|
130
|
+
});
|
|
90
131
|
it('should generate content for product_guidelines section', () => {
|
|
91
132
|
const result = generateContentForSection('product_guidelines', mockContext);
|
|
92
133
|
expect(result.success).toBe(true);
|
|
@@ -103,7 +144,7 @@ describe('autogenerateFlow', () => {
|
|
|
103
144
|
expect(result.success).toBe(true);
|
|
104
145
|
expect(result.content).toBeTruthy();
|
|
105
146
|
});
|
|
106
|
-
it('should
|
|
147
|
+
it('should reject low confidence context during generation', () => {
|
|
107
148
|
const lowConfidenceContext = {
|
|
108
149
|
...mockContext,
|
|
109
150
|
insights: {
|
|
@@ -116,11 +157,35 @@ describe('autogenerateFlow', () => {
|
|
|
116
157
|
...mockContext.insights.workflow,
|
|
117
158
|
confidence: 0.3,
|
|
118
159
|
},
|
|
160
|
+
techStack: {
|
|
161
|
+
...mockContext.insights.techStack,
|
|
162
|
+
frameworks: [],
|
|
163
|
+
languages: [],
|
|
164
|
+
},
|
|
165
|
+
},
|
|
166
|
+
raw: {
|
|
167
|
+
...mockContext.raw,
|
|
168
|
+
docs: [],
|
|
169
|
+
cicd: [],
|
|
170
|
+
manifests: [],
|
|
171
|
+
git: {
|
|
172
|
+
commitCount: 0,
|
|
173
|
+
conventionalCommits: 0,
|
|
174
|
+
patterns: [],
|
|
175
|
+
warnings: [],
|
|
176
|
+
},
|
|
177
|
+
structure: {
|
|
178
|
+
structure: [],
|
|
179
|
+
fileExtensions: [],
|
|
180
|
+
warnings: [],
|
|
181
|
+
},
|
|
119
182
|
},
|
|
120
183
|
};
|
|
121
184
|
const result = generateContentForSection('product_guide', lowConfidenceContext);
|
|
122
|
-
expect(result.success).toBe(
|
|
123
|
-
expect(result.
|
|
185
|
+
expect(result.success).toBe(false);
|
|
186
|
+
expect(result.error).toContain('Insufficient context');
|
|
187
|
+
expect(result.fallbackPrompt).toContain('Provide more context');
|
|
188
|
+
expect(result.ambiguityState?.ambiguous).toBe(true);
|
|
124
189
|
});
|
|
125
190
|
});
|
|
126
191
|
describe('validateContent', () => {
|
|
@@ -289,6 +354,120 @@ describe('autogenerateFlow', () => {
|
|
|
289
354
|
expect(prompt).toContain('C) Switch to manual Q&A');
|
|
290
355
|
});
|
|
291
356
|
});
|
|
357
|
+
describe('evaluateContextSufficiency', () => {
|
|
358
|
+
it('should report sufficient context with multiple signals', () => {
|
|
359
|
+
const result = evaluateContextSufficiency(mockContext);
|
|
360
|
+
expect(result.sufficient).toBe(true);
|
|
361
|
+
expect(result.reason).toContain('context sources');
|
|
362
|
+
expect(result.missing.length).toBeLessThan(6);
|
|
363
|
+
});
|
|
364
|
+
it('should report insufficient context with minimal signals', () => {
|
|
365
|
+
const minimalContext = {
|
|
366
|
+
...mockContext,
|
|
367
|
+
raw: {
|
|
368
|
+
...mockContext.raw,
|
|
369
|
+
manifests: [],
|
|
370
|
+
docs: [],
|
|
371
|
+
git: { ...mockContext.raw.git, commitCount: 0 },
|
|
372
|
+
structure: { ...mockContext.raw.structure, structure: [] },
|
|
373
|
+
ignores: { patterns: [] },
|
|
374
|
+
cicd: [],
|
|
375
|
+
},
|
|
376
|
+
meta: {
|
|
377
|
+
...mockContext.meta,
|
|
378
|
+
warnings: [
|
|
379
|
+
'No manifest files found',
|
|
380
|
+
'No README found',
|
|
381
|
+
'Not a git repository',
|
|
382
|
+
],
|
|
383
|
+
},
|
|
384
|
+
};
|
|
385
|
+
const result = evaluateContextSufficiency(minimalContext);
|
|
386
|
+
expect(result.sufficient).toBe(false);
|
|
387
|
+
expect(result.reason).toContain('Not enough project context');
|
|
388
|
+
expect(result.missing.length).toBeGreaterThanOrEqual(MIN_CONTEXT_SIGNALS);
|
|
389
|
+
});
|
|
390
|
+
});
|
|
391
|
+
describe('evaluateAmbiguity', () => {
|
|
392
|
+
it('should detect ambiguous contexts', () => {
|
|
393
|
+
const ambiguousContext = {
|
|
394
|
+
...mockContext,
|
|
395
|
+
insights: {
|
|
396
|
+
...mockContext.insights,
|
|
397
|
+
projectType: 'unknown',
|
|
398
|
+
techStack: { ...mockContext.insights.techStack, languages: [] },
|
|
399
|
+
},
|
|
400
|
+
meta: {
|
|
401
|
+
...mockContext.meta,
|
|
402
|
+
filesAnalyzed: 0,
|
|
403
|
+
warnings: ['No manifest files found', 'No README found'],
|
|
404
|
+
},
|
|
405
|
+
};
|
|
406
|
+
const result = evaluateAmbiguity(ambiguousContext);
|
|
407
|
+
expect(result.ambiguous).toBe(true);
|
|
408
|
+
expect(result.signals.length).toBeGreaterThan(0);
|
|
409
|
+
});
|
|
410
|
+
});
|
|
411
|
+
describe('formatFallbackPrompt', () => {
|
|
412
|
+
it('should include reason and missing data', () => {
|
|
413
|
+
const prompt = formatFallbackPrompt('Insufficient context', ['manifests']);
|
|
414
|
+
expect(prompt).toContain('Insufficient context');
|
|
415
|
+
expect(prompt).toContain('Missing: manifests');
|
|
416
|
+
expect(prompt).toContain('A) Continue with manual Q&A');
|
|
417
|
+
});
|
|
418
|
+
});
|
|
419
|
+
describe('formatAutogenerationFailure', () => {
|
|
420
|
+
it('should format a detailed failure summary', () => {
|
|
421
|
+
const sufficiency = {
|
|
422
|
+
sufficient: false,
|
|
423
|
+
reason: 'Not enough project context sources',
|
|
424
|
+
missing: ['docs'],
|
|
425
|
+
};
|
|
426
|
+
const ambiguity = {
|
|
427
|
+
ambiguous: true,
|
|
428
|
+
reason: 'Context ambiguous',
|
|
429
|
+
signals: ['Missing README'],
|
|
430
|
+
};
|
|
431
|
+
const message = formatAutogenerationFailure('product_guide', 'Insufficient context', sufficiency, ambiguity);
|
|
432
|
+
expect(message).toContain('Autogeneration failed');
|
|
433
|
+
expect(message).toContain('Insufficient context');
|
|
434
|
+
expect(message).toContain('Missing: docs');
|
|
435
|
+
expect(message).toContain('Ambiguity signals');
|
|
436
|
+
});
|
|
437
|
+
});
|
|
438
|
+
describe('parseFallbackChoice', () => {
|
|
439
|
+
it('should parse manual choice', () => {
|
|
440
|
+
expect(parseFallbackChoice('A')).toBe('A');
|
|
441
|
+
expect(parseFallbackChoice('manual')).toBe('A');
|
|
442
|
+
expect(parseFallbackChoice('m')).toBe('A');
|
|
443
|
+
});
|
|
444
|
+
it('should parse regenerate choice', () => {
|
|
445
|
+
expect(parseFallbackChoice('B')).toBe('B');
|
|
446
|
+
expect(parseFallbackChoice('regenerate')).toBe('B');
|
|
447
|
+
expect(parseFallbackChoice('context')).toBe('B');
|
|
448
|
+
});
|
|
449
|
+
it('should parse accept partial choice', () => {
|
|
450
|
+
expect(parseFallbackChoice('C')).toBe('C');
|
|
451
|
+
expect(parseFallbackChoice('partial')).toBe('C');
|
|
452
|
+
expect(parseFallbackChoice('accept')).toBe('C');
|
|
453
|
+
});
|
|
454
|
+
it('should reject invalid fallback choices', () => {
|
|
455
|
+
expect(parseFallbackChoice('D')).toBeNull();
|
|
456
|
+
expect(parseFallbackChoice('')).toBeNull();
|
|
457
|
+
});
|
|
458
|
+
});
|
|
459
|
+
describe('resolveFallbackAction', () => {
|
|
460
|
+
it('should resolve manual action', () => {
|
|
461
|
+
expect(resolveFallbackAction('A')).toBe('manual');
|
|
462
|
+
expect(resolveFallbackAction(null)).toBe('manual');
|
|
463
|
+
});
|
|
464
|
+
it('should resolve regenerate action', () => {
|
|
465
|
+
expect(resolveFallbackAction('B')).toBe('regenerate');
|
|
466
|
+
});
|
|
467
|
+
it('should resolve accept partial action', () => {
|
|
468
|
+
expect(resolveFallbackAction('C')).toBe('accept_partial');
|
|
469
|
+
});
|
|
470
|
+
});
|
|
292
471
|
describe('parseUserChoice', () => {
|
|
293
472
|
it('should parse Accept choices', () => {
|
|
294
473
|
expect(parseUserChoice('A')).toBe('A');
|
|
@@ -334,4 +513,98 @@ describe('autogenerateFlow', () => {
|
|
|
334
513
|
expect(isCancel('')).toBe(false);
|
|
335
514
|
});
|
|
336
515
|
});
|
|
516
|
+
describe('Fallback Handlers', () => {
|
|
517
|
+
describe('handleFallbackManual', () => {
|
|
518
|
+
it('should return failure with manual fallback action', () => {
|
|
519
|
+
const result = handleFallbackManual('product_guide', 'Insufficient context');
|
|
520
|
+
expect(result.success).toBe(false);
|
|
521
|
+
expect(result.error).toContain('Fallback to manual Q&A');
|
|
522
|
+
expect(result.error).toContain('Insufficient context');
|
|
523
|
+
expect(result.fallbackAction).toBe('manual');
|
|
524
|
+
});
|
|
525
|
+
});
|
|
526
|
+
describe('handleFallbackAcceptPartial', () => {
|
|
527
|
+
it('should accept valid partial content', () => {
|
|
528
|
+
const partialContent = 'This is a partial product description with limited details';
|
|
529
|
+
const result = handleFallbackAcceptPartial(partialContent, 'product_guide', 'Low confidence generation');
|
|
530
|
+
expect(result.success).toBe(true);
|
|
531
|
+
expect(result.content).toBe(partialContent);
|
|
532
|
+
expect(result.fallbackAction).toBe('accept_partial');
|
|
533
|
+
});
|
|
534
|
+
it('should reject invalid partial content and fall back to manual', () => {
|
|
535
|
+
const invalidContent = 'xyz';
|
|
536
|
+
const result = handleFallbackAcceptPartial(invalidContent, 'product_guide', 'Low confidence');
|
|
537
|
+
expect(result.success).toBe(false);
|
|
538
|
+
expect(result.error).toContain('Partial content is invalid');
|
|
539
|
+
expect(result.fallbackAction).toBe('manual');
|
|
540
|
+
});
|
|
541
|
+
it('should reject empty partial content', () => {
|
|
542
|
+
const result = handleFallbackAcceptPartial('', 'product_guide', 'Empty generation');
|
|
543
|
+
expect(result.success).toBe(false);
|
|
544
|
+
expect(result.error).toContain('Partial content is invalid');
|
|
545
|
+
expect(result.fallbackAction).toBe('manual');
|
|
546
|
+
});
|
|
547
|
+
});
|
|
548
|
+
describe('handleFallbackRegenerate', () => {
|
|
549
|
+
it('should regenerate content with updated context', () => {
|
|
550
|
+
const result = handleFallbackRegenerate('product_guide', mockContext, 'Focus on developer tooling aspect');
|
|
551
|
+
expect(result.success).toBe(true);
|
|
552
|
+
expect(result.content).toBeTruthy();
|
|
553
|
+
});
|
|
554
|
+
it('should fail regeneration with insufficient context', () => {
|
|
555
|
+
const minimalContext = {
|
|
556
|
+
raw: {
|
|
557
|
+
manifests: [],
|
|
558
|
+
docs: [],
|
|
559
|
+
cicd: [],
|
|
560
|
+
ignores: { patterns: [] },
|
|
561
|
+
git: {
|
|
562
|
+
commitCount: 0,
|
|
563
|
+
conventionalCommits: 0,
|
|
564
|
+
patterns: [],
|
|
565
|
+
warnings: [],
|
|
566
|
+
},
|
|
567
|
+
structure: {
|
|
568
|
+
structure: [],
|
|
569
|
+
fileExtensions: [],
|
|
570
|
+
warnings: [],
|
|
571
|
+
},
|
|
572
|
+
},
|
|
573
|
+
insights: {
|
|
574
|
+
techStack: {
|
|
575
|
+
languages: [],
|
|
576
|
+
frameworks: [],
|
|
577
|
+
databases: [],
|
|
578
|
+
infrastructure: [],
|
|
579
|
+
testing: [],
|
|
580
|
+
},
|
|
581
|
+
product: {
|
|
582
|
+
targetUsers: [],
|
|
583
|
+
coreProblems: [],
|
|
584
|
+
keyFeatures: [],
|
|
585
|
+
confidence: 0.0,
|
|
586
|
+
},
|
|
587
|
+
workflow: {
|
|
588
|
+
commitConvention: 'none',
|
|
589
|
+
testingStrategy: 'unknown',
|
|
590
|
+
branchStrategy: 'unknown',
|
|
591
|
+
confidence: 0.0,
|
|
592
|
+
},
|
|
593
|
+
projectType: 'unknown',
|
|
594
|
+
maturity: 'early',
|
|
595
|
+
},
|
|
596
|
+
meta: {
|
|
597
|
+
analyzedAt: new Date().toISOString(),
|
|
598
|
+
analysisTimeMs: 0,
|
|
599
|
+
filesAnalyzed: 0,
|
|
600
|
+
categoriesCompleted: [],
|
|
601
|
+
warnings: [],
|
|
602
|
+
},
|
|
603
|
+
};
|
|
604
|
+
const result = handleFallbackRegenerate('workflow', minimalContext, 'Please add more workflow details');
|
|
605
|
+
expect(result.success).toBe(false);
|
|
606
|
+
expect(result.error).toContain('Insufficient context');
|
|
607
|
+
});
|
|
608
|
+
});
|
|
609
|
+
});
|
|
337
610
|
});
|
|
@@ -7,29 +7,43 @@ export function detectOrchestratorConfig() {
|
|
|
7
7
|
const omoJsonPath = join(opencodeConfigDir, "oh-my-opencode.json");
|
|
8
8
|
let hasOrchestratorInOpenCode = false;
|
|
9
9
|
let hasOrchestratorInOMO = false;
|
|
10
|
-
|
|
10
|
+
let orchestratorModel;
|
|
11
|
+
// Check oh-my-opencode.json first (higher priority)
|
|
12
|
+
if (existsSync(omoJsonPath)) {
|
|
11
13
|
try {
|
|
12
|
-
const config = JSON.parse(readFileSync(
|
|
13
|
-
if (config.
|
|
14
|
-
|
|
14
|
+
const config = JSON.parse(readFileSync(omoJsonPath, "utf-8"));
|
|
15
|
+
if (config.agents && config.agents.orchestrator) {
|
|
16
|
+
hasOrchestratorInOMO = true;
|
|
17
|
+
// Extract model from oh-my-opencode.json
|
|
18
|
+
if (config.agents.orchestrator.model) {
|
|
19
|
+
orchestratorModel = config.agents.orchestrator.model;
|
|
20
|
+
}
|
|
15
21
|
}
|
|
16
22
|
}
|
|
17
23
|
catch (e) {
|
|
24
|
+
// Silently fail on parse errors
|
|
18
25
|
}
|
|
19
26
|
}
|
|
20
|
-
if
|
|
27
|
+
// Check opencode.json (fallback if model not found in OMO)
|
|
28
|
+
if (existsSync(opencodeJsonPath)) {
|
|
21
29
|
try {
|
|
22
|
-
const config = JSON.parse(readFileSync(
|
|
23
|
-
if (config.
|
|
24
|
-
|
|
30
|
+
const config = JSON.parse(readFileSync(opencodeJsonPath, "utf-8"));
|
|
31
|
+
if (config.agent && config.agent.orchestrator) {
|
|
32
|
+
hasOrchestratorInOpenCode = true;
|
|
33
|
+
// Only use this model if we didn't find one in oh-my-opencode.json
|
|
34
|
+
if (!orchestratorModel && config.agent.orchestrator.model) {
|
|
35
|
+
orchestratorModel = config.agent.orchestrator.model;
|
|
36
|
+
}
|
|
25
37
|
}
|
|
26
38
|
}
|
|
27
39
|
catch (e) {
|
|
40
|
+
// Silently fail on parse errors
|
|
28
41
|
}
|
|
29
42
|
}
|
|
30
43
|
return {
|
|
31
44
|
hasOrchestratorInOpenCode,
|
|
32
45
|
hasOrchestratorInOMO,
|
|
33
46
|
synergyActive: hasOrchestratorInOMO,
|
|
47
|
+
orchestratorModel,
|
|
34
48
|
};
|
|
35
49
|
}
|
|
@@ -18,7 +18,7 @@ describe("configDetection", () => {
|
|
|
18
18
|
vi.mocked(existsSync).mockImplementation((path) => path === opencodeJsonPath);
|
|
19
19
|
vi.mocked(readFileSync).mockImplementation((path) => {
|
|
20
20
|
if (path === opencodeJsonPath) {
|
|
21
|
-
return JSON.stringify({ agent: { orchestrator: {} } });
|
|
21
|
+
return JSON.stringify({ agent: { orchestrator: { model: "anthropic/claude-3-5-sonnet" } } });
|
|
22
22
|
}
|
|
23
23
|
return "";
|
|
24
24
|
});
|
|
@@ -26,12 +26,13 @@ describe("configDetection", () => {
|
|
|
26
26
|
expect(result.hasOrchestratorInOpenCode).toBe(true);
|
|
27
27
|
expect(result.hasOrchestratorInOMO).toBe(false);
|
|
28
28
|
expect(result.synergyActive).toBe(false);
|
|
29
|
+
expect(result.orchestratorModel).toBe("anthropic/claude-3-5-sonnet");
|
|
29
30
|
});
|
|
30
31
|
it("should detect orchestrator in oh-my-opencode.json and activate synergy", () => {
|
|
31
32
|
vi.mocked(existsSync).mockImplementation((path) => path === omoJsonPath);
|
|
32
33
|
vi.mocked(readFileSync).mockImplementation((path) => {
|
|
33
34
|
if (path === omoJsonPath) {
|
|
34
|
-
return JSON.stringify({ agents: { orchestrator: {} } });
|
|
35
|
+
return JSON.stringify({ agents: { orchestrator: { model: "anthropic/claude-3-5-haiku" } } });
|
|
35
36
|
}
|
|
36
37
|
return "";
|
|
37
38
|
});
|
|
@@ -39,15 +40,16 @@ describe("configDetection", () => {
|
|
|
39
40
|
expect(result.hasOrchestratorInOpenCode).toBe(false);
|
|
40
41
|
expect(result.hasOrchestratorInOMO).toBe(true);
|
|
41
42
|
expect(result.synergyActive).toBe(true);
|
|
43
|
+
expect(result.orchestratorModel).toBe("anthropic/claude-3-5-haiku");
|
|
42
44
|
});
|
|
43
45
|
it("should handle both configs present", () => {
|
|
44
46
|
vi.mocked(existsSync).mockReturnValue(true);
|
|
45
47
|
vi.mocked(readFileSync).mockImplementation((path) => {
|
|
46
48
|
if (path === opencodeJsonPath) {
|
|
47
|
-
return JSON.stringify({ agent: { orchestrator: {} } });
|
|
49
|
+
return JSON.stringify({ agent: { orchestrator: { model: "anthropic/claude-3-5-sonnet" } } });
|
|
48
50
|
}
|
|
49
51
|
if (path === omoJsonPath) {
|
|
50
|
-
return JSON.stringify({ agents: { orchestrator: {} } });
|
|
52
|
+
return JSON.stringify({ agents: { orchestrator: { model: "anthropic/claude-3-5-haiku" } } });
|
|
51
53
|
}
|
|
52
54
|
return "";
|
|
53
55
|
});
|
|
@@ -55,6 +57,7 @@ describe("configDetection", () => {
|
|
|
55
57
|
expect(result.hasOrchestratorInOpenCode).toBe(true);
|
|
56
58
|
expect(result.hasOrchestratorInOMO).toBe(true);
|
|
57
59
|
expect(result.synergyActive).toBe(true);
|
|
60
|
+
expect(result.orchestratorModel).toBe("anthropic/claude-3-5-haiku");
|
|
58
61
|
});
|
|
59
62
|
it("should handle missing configs", () => {
|
|
60
63
|
vi.mocked(existsSync).mockReturnValue(false);
|
|
@@ -62,6 +65,7 @@ describe("configDetection", () => {
|
|
|
62
65
|
expect(result.hasOrchestratorInOpenCode).toBe(false);
|
|
63
66
|
expect(result.hasOrchestratorInOMO).toBe(false);
|
|
64
67
|
expect(result.synergyActive).toBe(false);
|
|
68
|
+
expect(result.orchestratorModel).toBeUndefined();
|
|
65
69
|
});
|
|
66
70
|
it("should handle malformed JSON", () => {
|
|
67
71
|
vi.mocked(existsSync).mockReturnValue(true);
|
|
@@ -70,5 +74,46 @@ describe("configDetection", () => {
|
|
|
70
74
|
expect(result.hasOrchestratorInOpenCode).toBe(false);
|
|
71
75
|
expect(result.hasOrchestratorInOMO).toBe(false);
|
|
72
76
|
expect(result.synergyActive).toBe(false);
|
|
77
|
+
expect(result.orchestratorModel).toBeUndefined();
|
|
78
|
+
});
|
|
79
|
+
it("should prioritize oh-my-opencode.json model over opencode.json", () => {
|
|
80
|
+
vi.mocked(existsSync).mockReturnValue(true);
|
|
81
|
+
vi.mocked(readFileSync).mockImplementation((path) => {
|
|
82
|
+
if (path === opencodeJsonPath) {
|
|
83
|
+
return JSON.stringify({ agent: { orchestrator: { model: "model-from-opencode" } } });
|
|
84
|
+
}
|
|
85
|
+
if (path === omoJsonPath) {
|
|
86
|
+
return JSON.stringify({ agents: { orchestrator: { model: "model-from-omo" } } });
|
|
87
|
+
}
|
|
88
|
+
return "";
|
|
89
|
+
});
|
|
90
|
+
const result = detectOrchestratorConfig();
|
|
91
|
+
expect(result.orchestratorModel).toBe("model-from-omo");
|
|
92
|
+
});
|
|
93
|
+
it("should fallback to opencode.json model when oh-my-opencode.json has no model", () => {
|
|
94
|
+
vi.mocked(existsSync).mockReturnValue(true);
|
|
95
|
+
vi.mocked(readFileSync).mockImplementation((path) => {
|
|
96
|
+
if (path === opencodeJsonPath) {
|
|
97
|
+
return JSON.stringify({ agent: { orchestrator: { model: "model-from-opencode" } } });
|
|
98
|
+
}
|
|
99
|
+
if (path === omoJsonPath) {
|
|
100
|
+
return JSON.stringify({ agents: { orchestrator: {} } });
|
|
101
|
+
}
|
|
102
|
+
return "";
|
|
103
|
+
});
|
|
104
|
+
const result = detectOrchestratorConfig();
|
|
105
|
+
expect(result.orchestratorModel).toBe("model-from-opencode");
|
|
106
|
+
});
|
|
107
|
+
it("should handle orchestrator config without model field", () => {
|
|
108
|
+
vi.mocked(existsSync).mockImplementation((path) => path === omoJsonPath);
|
|
109
|
+
vi.mocked(readFileSync).mockImplementation((path) => {
|
|
110
|
+
if (path === omoJsonPath) {
|
|
111
|
+
return JSON.stringify({ agents: { orchestrator: {} } });
|
|
112
|
+
}
|
|
113
|
+
return "";
|
|
114
|
+
});
|
|
115
|
+
const result = detectOrchestratorConfig();
|
|
116
|
+
expect(result.hasOrchestratorInOMO).toBe(true);
|
|
117
|
+
expect(result.orchestratorModel).toBeUndefined();
|
|
73
118
|
});
|
|
74
119
|
});
|
|
@@ -124,12 +124,12 @@ import { foo } from 'test-project'
|
|
|
124
124
|
expect(result.docs[0].content).toContain("Feature A");
|
|
125
125
|
});
|
|
126
126
|
it("should sample large files (> 10KB)", async () => {
|
|
127
|
-
const
|
|
127
|
+
const largeLine = "x".repeat(100);
|
|
128
|
+
const largeContent = Array(250).fill(largeLine).join("\n") + "\n\nIMPORTANT INFO";
|
|
128
129
|
vi.mocked(readFile).mockResolvedValue(largeContent);
|
|
129
130
|
vi.mocked(readdir).mockResolvedValue(["README.md"]);
|
|
130
131
|
const result = await analyzeDocs("/test/project");
|
|
131
132
|
expect(result.docs).toHaveLength(1);
|
|
132
|
-
// Should sample, not include entire content
|
|
133
133
|
expect(result.docs[0].content.length).toBeLessThan(largeContent.length);
|
|
134
134
|
expect(result.warnings).toContain("Large file detected, using sampling");
|
|
135
135
|
});
|
|
@@ -142,25 +142,23 @@ import { foo } from 'test-project'
|
|
|
142
142
|
});
|
|
143
143
|
describe("analyzeGitHistory", () => {
|
|
144
144
|
it("should analyze git commit patterns", async () => {
|
|
145
|
-
vi.mocked(execSync).
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
`));
|
|
145
|
+
vi.mocked(execSync).mockImplementation(() => `abc1234 feat: add new feature
|
|
146
|
+
def5678 fix: bug fix
|
|
147
|
+
abc9101 chore: update deps
|
|
148
|
+
def1121 test: add tests
|
|
149
|
+
abc3141 feat(auth): implement login
|
|
150
|
+
`);
|
|
152
151
|
const result = await analyzeGitHistory("/test/project");
|
|
153
152
|
expect(result.commitCount).toBe(5);
|
|
154
153
|
expect(result.conventionalCommits).toBeGreaterThan(0);
|
|
155
154
|
expect(result.patterns).toContain("conventional");
|
|
156
155
|
});
|
|
157
156
|
it("should detect non-conventional commit patterns", async () => {
|
|
158
|
-
vi.mocked(execSync).
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
`));
|
|
157
|
+
vi.mocked(execSync).mockImplementation(() => `abc1234 Add new feature
|
|
158
|
+
def5678 Fix bug
|
|
159
|
+
abc9101 Update README
|
|
160
|
+
def1121 Random commit message
|
|
161
|
+
`);
|
|
164
162
|
const result = await analyzeGitHistory("/test/project");
|
|
165
163
|
expect(result.commitCount).toBe(4);
|
|
166
164
|
expect(result.conventionalCommits).toBe(0);
|
|
@@ -179,10 +177,10 @@ Random commit message
|
|
|
179
177
|
it("should analyze directory structure", async () => {
|
|
180
178
|
vi.mocked(readdir).mockImplementation(async (path) => {
|
|
181
179
|
if (path === "/test/project") {
|
|
182
|
-
return ["src", "tests", "package.json", "README.md"];
|
|
180
|
+
return ["src", "tests", "package.json", "README.md", "index.ts", "config.ts"];
|
|
183
181
|
}
|
|
184
182
|
if (path === "/test/project/src") {
|
|
185
|
-
return ["
|
|
183
|
+
return ["utils.ts", "components"];
|
|
186
184
|
}
|
|
187
185
|
return [];
|
|
188
186
|
});
|