@agile-vibe-coding/avc 0.1.1 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cli/agent-loader.js +21 -0
- package/cli/agents/agent-selector.md +152 -0
- package/cli/agents/architecture-recommender.md +418 -0
- package/cli/agents/code-implementer.md +117 -0
- package/cli/agents/code-validator.md +80 -0
- package/cli/agents/context-reviewer-epic.md +101 -0
- package/cli/agents/context-reviewer-story.md +92 -0
- package/cli/agents/context-writer-epic.md +145 -0
- package/cli/agents/context-writer-story.md +111 -0
- package/cli/agents/database-deep-dive.md +470 -0
- package/cli/agents/database-recommender.md +634 -0
- package/cli/agents/doc-distributor.md +176 -0
- package/cli/agents/doc-writer-epic.md +42 -0
- package/cli/agents/doc-writer-story.md +43 -0
- package/cli/agents/documentation-updater.md +203 -0
- package/cli/agents/duplicate-detector.md +110 -0
- package/cli/agents/epic-story-decomposer.md +559 -0
- package/cli/agents/feature-context-generator.md +91 -0
- package/cli/agents/gap-checker-epic.md +52 -0
- package/cli/agents/impact-checker-story.md +51 -0
- package/cli/agents/migration-guide-generator.md +305 -0
- package/cli/agents/mission-scope-generator.md +143 -0
- package/cli/agents/mission-scope-validator.md +146 -0
- package/cli/agents/project-context-extractor.md +122 -0
- package/cli/agents/project-documentation-creator.json +226 -0
- package/cli/agents/project-documentation-creator.md +595 -0
- package/cli/agents/question-prefiller.md +269 -0
- package/cli/agents/refiner-epic.md +39 -0
- package/cli/agents/refiner-story.md +42 -0
- package/cli/agents/scaffolding-generator.md +99 -0
- package/cli/agents/seed-validator.md +71 -0
- package/cli/agents/story-doc-enricher.md +133 -0
- package/cli/agents/story-scope-reviewer.md +147 -0
- package/cli/agents/story-splitter.md +83 -0
- package/cli/agents/suggestion-business-analyst.md +88 -0
- package/cli/agents/suggestion-deployment-architect.md +263 -0
- package/cli/agents/suggestion-product-manager.md +129 -0
- package/cli/agents/suggestion-security-specialist.md +156 -0
- package/cli/agents/suggestion-technical-architect.md +269 -0
- package/cli/agents/suggestion-ux-researcher.md +93 -0
- package/cli/agents/task-subtask-decomposer.md +188 -0
- package/cli/agents/validator-documentation.json +183 -0
- package/cli/agents/validator-documentation.md +455 -0
- package/cli/agents/validator-selector.md +211 -0
- package/cli/ansi-colors.js +21 -0
- package/cli/api-reference-tool.js +368 -0
- package/cli/build-docs.js +29 -8
- package/cli/ceremony-history.js +369 -0
- package/cli/checks/catalog.json +76 -0
- package/cli/checks/code/quality.json +26 -0
- package/cli/checks/code/testing.json +14 -0
- package/cli/checks/code/traceability.json +26 -0
- package/cli/checks/cross-refs/epic.json +171 -0
- package/cli/checks/cross-refs/story.json +149 -0
- package/cli/checks/epic/api.json +114 -0
- package/cli/checks/epic/backend.json +126 -0
- package/cli/checks/epic/cloud.json +126 -0
- package/cli/checks/epic/data.json +102 -0
- package/cli/checks/epic/database.json +114 -0
- package/cli/checks/epic/developer.json +182 -0
- package/cli/checks/epic/devops.json +174 -0
- package/cli/checks/epic/frontend.json +162 -0
- package/cli/checks/epic/mobile.json +102 -0
- package/cli/checks/epic/qa.json +90 -0
- package/cli/checks/epic/security.json +184 -0
- package/cli/checks/epic/solution-architect.json +192 -0
- package/cli/checks/epic/test-architect.json +90 -0
- package/cli/checks/epic/ui.json +102 -0
- package/cli/checks/epic/ux.json +90 -0
- package/cli/checks/fixes/epic-fix-template.md +10 -0
- package/cli/checks/fixes/story-fix-template.md +10 -0
- package/cli/checks/story/api.json +186 -0
- package/cli/checks/story/backend.json +102 -0
- package/cli/checks/story/cloud.json +102 -0
- package/cli/checks/story/data.json +210 -0
- package/cli/checks/story/database.json +102 -0
- package/cli/checks/story/developer.json +168 -0
- package/cli/checks/story/devops.json +102 -0
- package/cli/checks/story/frontend.json +174 -0
- package/cli/checks/story/mobile.json +102 -0
- package/cli/checks/story/qa.json +210 -0
- package/cli/checks/story/security.json +198 -0
- package/cli/checks/story/solution-architect.json +230 -0
- package/cli/checks/story/test-architect.json +210 -0
- package/cli/checks/story/ui.json +102 -0
- package/cli/checks/story/ux.json +102 -0
- package/cli/coding-order.js +401 -0
- package/cli/command-logger.js +49 -12
- package/cli/components/static-output.js +63 -0
- package/cli/console-output-manager.js +94 -0
- package/cli/dependency-checker.js +72 -0
- package/cli/docs-sync.js +306 -0
- package/cli/epic-story-validator.js +659 -0
- package/cli/evaluation-prompts.js +1008 -0
- package/cli/execution-context.js +195 -0
- package/cli/generate-summary-table.js +340 -0
- package/cli/init-model-config.js +704 -0
- package/cli/init.js +1737 -278
- package/cli/kanban-server-manager.js +227 -0
- package/cli/llm-claude.js +150 -1
- package/cli/llm-gemini.js +109 -0
- package/cli/llm-local.js +493 -0
- package/cli/llm-mock.js +233 -0
- package/cli/llm-openai.js +454 -0
- package/cli/llm-provider.js +379 -3
- package/cli/llm-token-limits.js +211 -0
- package/cli/llm-verifier.js +662 -0
- package/cli/llm-xiaomi.js +143 -0
- package/cli/message-constants.js +49 -0
- package/cli/message-manager.js +334 -0
- package/cli/message-types.js +96 -0
- package/cli/messaging-api.js +291 -0
- package/cli/micro-check-fixer.js +335 -0
- package/cli/micro-check-runner.js +449 -0
- package/cli/micro-check-scorer.js +148 -0
- package/cli/micro-check-validator.js +538 -0
- package/cli/model-pricing.js +192 -0
- package/cli/model-query-engine.js +468 -0
- package/cli/model-recommendation-analyzer.js +495 -0
- package/cli/model-selector.js +270 -0
- package/cli/output-buffer.js +107 -0
- package/cli/process-manager.js +73 -2
- package/cli/prompt-logger.js +57 -0
- package/cli/repl-ink.js +4625 -1094
- package/cli/repl-old.js +3 -4
- package/cli/seed-processor.js +962 -0
- package/cli/sprint-planning-processor.js +4162 -0
- package/cli/template-processor.js +2149 -105
- package/cli/templates/project.md +25 -8
- package/cli/templates/vitepress-config.mts.template +5 -4
- package/cli/token-tracker.js +547 -0
- package/cli/tools/generate-story-validators.js +317 -0
- package/cli/tools/generate-validators.js +669 -0
- package/cli/update-checker.js +19 -17
- package/cli/update-notifier.js +4 -4
- package/cli/validation-router.js +667 -0
- package/cli/verification-tracker.js +563 -0
- package/cli/worktree-runner.js +654 -0
- package/kanban/README.md +386 -0
- package/kanban/client/README.md +205 -0
- package/kanban/client/components.json +20 -0
- package/kanban/client/dist/assets/index-D_KC5EQT.css +1 -0
- package/kanban/client/dist/assets/index-DjY5zqW7.js +351 -0
- package/kanban/client/dist/index.html +16 -0
- package/kanban/client/dist/vite.svg +1 -0
- package/kanban/client/index.html +15 -0
- package/kanban/client/package-lock.json +9442 -0
- package/kanban/client/package.json +44 -0
- package/kanban/client/postcss.config.js +6 -0
- package/kanban/client/public/vite.svg +1 -0
- package/kanban/client/src/App.jsx +651 -0
- package/kanban/client/src/components/ProjectFileEditorPopup.jsx +117 -0
- package/kanban/client/src/components/ceremony/AskArchPopup.jsx +420 -0
- package/kanban/client/src/components/ceremony/AskModelPopup.jsx +629 -0
- package/kanban/client/src/components/ceremony/CeremonyWorkflowModal.jsx +1133 -0
- package/kanban/client/src/components/ceremony/EpicStorySelectionModal.jsx +254 -0
- package/kanban/client/src/components/ceremony/ProviderSwitcherButton.jsx +290 -0
- package/kanban/client/src/components/ceremony/SponsorCallModal.jsx +686 -0
- package/kanban/client/src/components/ceremony/SprintPlanningModal.jsx +838 -0
- package/kanban/client/src/components/ceremony/steps/ArchitectureStep.jsx +150 -0
- package/kanban/client/src/components/ceremony/steps/CompleteStep.jsx +136 -0
- package/kanban/client/src/components/ceremony/steps/DatabaseStep.jsx +202 -0
- package/kanban/client/src/components/ceremony/steps/DeploymentStep.jsx +123 -0
- package/kanban/client/src/components/ceremony/steps/MissionStep.jsx +106 -0
- package/kanban/client/src/components/ceremony/steps/ReviewAnswersStep.jsx +329 -0
- package/kanban/client/src/components/ceremony/steps/RunningStep.jsx +249 -0
- package/kanban/client/src/components/kanban/CardDetailModal.jsx +646 -0
- package/kanban/client/src/components/kanban/EpicSection.jsx +146 -0
- package/kanban/client/src/components/kanban/FilterToolbar.jsx +222 -0
- package/kanban/client/src/components/kanban/GroupingSelector.jsx +63 -0
- package/kanban/client/src/components/kanban/KanbanBoard.jsx +211 -0
- package/kanban/client/src/components/kanban/KanbanCard.jsx +147 -0
- package/kanban/client/src/components/kanban/KanbanColumn.jsx +90 -0
- package/kanban/client/src/components/kanban/RefineWorkItemPopup.jsx +784 -0
- package/kanban/client/src/components/kanban/RunButton.jsx +162 -0
- package/kanban/client/src/components/kanban/SeedButton.jsx +176 -0
- package/kanban/client/src/components/layout/LoadingScreen.jsx +82 -0
- package/kanban/client/src/components/process/ProcessMonitorBar.jsx +80 -0
- package/kanban/client/src/components/settings/AgentEditorPopup.jsx +171 -0
- package/kanban/client/src/components/settings/AgentsTab.jsx +381 -0
- package/kanban/client/src/components/settings/ApiKeysTab.jsx +142 -0
- package/kanban/client/src/components/settings/CeremonyModelsTab.jsx +105 -0
- package/kanban/client/src/components/settings/CheckEditorPopup.jsx +507 -0
- package/kanban/client/src/components/settings/CostThresholdsTab.jsx +95 -0
- package/kanban/client/src/components/settings/ModelPricingTab.jsx +269 -0
- package/kanban/client/src/components/settings/OpenAIAuthSection.jsx +412 -0
- package/kanban/client/src/components/settings/ServersTab.jsx +121 -0
- package/kanban/client/src/components/settings/SettingsModal.jsx +84 -0
- package/kanban/client/src/components/stats/CostModal.jsx +384 -0
- package/kanban/client/src/components/ui/badge.jsx +27 -0
- package/kanban/client/src/components/ui/dialog.jsx +121 -0
- package/kanban/client/src/components/ui/tabs.jsx +85 -0
- package/kanban/client/src/hooks/__tests__/useGrouping.test.js +232 -0
- package/kanban/client/src/hooks/useGrouping.js +177 -0
- package/kanban/client/src/hooks/useWebSocket.js +120 -0
- package/kanban/client/src/lib/__tests__/api.test.js +196 -0
- package/kanban/client/src/lib/__tests__/status-grouping.test.js +94 -0
- package/kanban/client/src/lib/api.js +515 -0
- package/kanban/client/src/lib/status-grouping.js +154 -0
- package/kanban/client/src/lib/utils.js +11 -0
- package/kanban/client/src/main.jsx +10 -0
- package/kanban/client/src/store/__tests__/kanbanStore.test.js +164 -0
- package/kanban/client/src/store/ceremonyStore.js +172 -0
- package/kanban/client/src/store/filterStore.js +201 -0
- package/kanban/client/src/store/kanbanStore.js +123 -0
- package/kanban/client/src/store/processStore.js +65 -0
- package/kanban/client/src/store/sprintPlanningStore.js +33 -0
- package/kanban/client/src/styles/globals.css +59 -0
- package/kanban/client/tailwind.config.js +77 -0
- package/kanban/client/vite.config.js +28 -0
- package/kanban/client/vitest.config.js +28 -0
- package/kanban/dev-start.sh +47 -0
- package/kanban/package.json +12 -0
- package/kanban/server/index.js +537 -0
- package/kanban/server/routes/ceremony.js +454 -0
- package/kanban/server/routes/costs.js +163 -0
- package/kanban/server/routes/openai-oauth.js +366 -0
- package/kanban/server/routes/processes.js +50 -0
- package/kanban/server/routes/settings.js +736 -0
- package/kanban/server/routes/websocket.js +281 -0
- package/kanban/server/routes/work-items.js +487 -0
- package/kanban/server/services/CeremonyService.js +1441 -0
- package/kanban/server/services/FileSystemScanner.js +95 -0
- package/kanban/server/services/FileWatcher.js +144 -0
- package/kanban/server/services/HierarchyBuilder.js +196 -0
- package/kanban/server/services/ProcessRegistry.js +122 -0
- package/kanban/server/services/TaskRunnerService.js +261 -0
- package/kanban/server/services/WorkItemReader.js +123 -0
- package/kanban/server/services/WorkItemRefineService.js +510 -0
- package/kanban/server/start.js +49 -0
- package/kanban/server/utils/kanban-logger.js +132 -0
- package/kanban/server/utils/markdown.js +91 -0
- package/kanban/server/utils/status-grouping.js +107 -0
- package/kanban/server/workers/run-task-worker.js +121 -0
- package/kanban/server/workers/seed-worker.js +94 -0
- package/kanban/server/workers/sponsor-call-worker.js +92 -0
- package/kanban/server/workers/sprint-planning-worker.js +212 -0
- package/package.json +19 -7
- package/cli/agents/documentation.md +0 -302
package/cli/llm-mock.js
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* MockLLMProvider — instant canned responses for E2E testing.
|
|
3
|
+
* Activated when AVC_LLM_MOCK=1 is set in the environment.
|
|
4
|
+
*
|
|
5
|
+
* Detects what kind of response to return by inspecting the prompt text.
|
|
6
|
+
*/
|
|
7
|
+
export class MockLLMProvider {
|
|
8
|
+
constructor() {
|
|
9
|
+
this.providerName = 'mock';
|
|
10
|
+
this.model = 'mock-model';
|
|
11
|
+
this.tokenUsage = { inputTokens: 0, outputTokens: 0, totalCalls: 0 };
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
async validateApiKey() {
|
|
15
|
+
return { valid: true };
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
getTokenUsage() {
|
|
19
|
+
return {
|
|
20
|
+
inputTokens: this.tokenUsage.inputTokens,
|
|
21
|
+
outputTokens: this.tokenUsage.outputTokens,
|
|
22
|
+
totalTokens: this.tokenUsage.inputTokens + this.tokenUsage.outputTokens,
|
|
23
|
+
totalCalls: this.tokenUsage.totalCalls,
|
|
24
|
+
estimatedCost: 0,
|
|
25
|
+
provider: 'mock',
|
|
26
|
+
model: 'mock-model'
|
|
27
|
+
};
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
_track(prompt = '') {
|
|
31
|
+
this.tokenUsage.inputTokens += Math.ceil(prompt.length / 4);
|
|
32
|
+
this.tokenUsage.outputTokens += 50;
|
|
33
|
+
this.tokenUsage.totalCalls++;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
/** generateJSON — detect call type from agent instructions (most reliable discriminator) */
|
|
37
|
+
async generateJSON(prompt, agentInstructions = null) {
|
|
38
|
+
this._track(prompt);
|
|
39
|
+
|
|
40
|
+
const p = (prompt || '').toLowerCase();
|
|
41
|
+
// Use agent instructions filename/content as the primary discriminator — it's
|
|
42
|
+
// more reliable than prompt text which can contain overlapping keywords.
|
|
43
|
+
const agent = (agentInstructions || '').toLowerCase();
|
|
44
|
+
|
|
45
|
+
// Validation calls (validator-documentation.md / validator-context.md)
|
|
46
|
+
// validator-documentation.md contains "validationStatus" and "overallScore" as output fields
|
|
47
|
+
if (agent.includes('validationstatus') || agent.includes('overallscore') ||
|
|
48
|
+
p.includes('validate the following')) {
|
|
49
|
+
return {
|
|
50
|
+
validationStatus: 'acceptable',
|
|
51
|
+
overallScore: 90,
|
|
52
|
+
issues: [],
|
|
53
|
+
contentIssues: [],
|
|
54
|
+
structuralIssues: [],
|
|
55
|
+
applicationFlowGaps: [],
|
|
56
|
+
strengths: ['Well-structured document (mock validation)'],
|
|
57
|
+
improvementPriorities: [],
|
|
58
|
+
readyForPublication: true,
|
|
59
|
+
readyForUse: true
|
|
60
|
+
};
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
// Database recommendation (database-recommender.md)
|
|
64
|
+
// database-recommender.md uniquely contains "hasDatabaseNeeds" as an output field
|
|
65
|
+
if (agent.includes('hasdatabaseneeds') || p.includes('determine if it needs a database')) {
|
|
66
|
+
return {
|
|
67
|
+
hasDatabaseNeeds: true,
|
|
68
|
+
comparison: {
|
|
69
|
+
sqlOption: {
|
|
70
|
+
database: 'PostgreSQL',
|
|
71
|
+
specificVersion: 'PostgreSQL 16',
|
|
72
|
+
bestFor: 'Relational data with ACID guarantees',
|
|
73
|
+
strengths: ['Strong consistency', 'Rich query language', 'Mature ecosystem'],
|
|
74
|
+
weaknesses: ['Schema migrations required'],
|
|
75
|
+
estimatedCosts: { monthly: '$0 (local Docker)' }
|
|
76
|
+
},
|
|
77
|
+
nosqlOption: {
|
|
78
|
+
database: 'MongoDB',
|
|
79
|
+
specificVersion: 'MongoDB 7',
|
|
80
|
+
bestFor: 'Flexible document storage',
|
|
81
|
+
strengths: ['Schema flexibility', 'Easy horizontal scaling'],
|
|
82
|
+
weaknesses: ['Eventual consistency by default'],
|
|
83
|
+
estimatedCosts: { monthly: '$0 (local Docker)' }
|
|
84
|
+
},
|
|
85
|
+
keyMetrics: {
|
|
86
|
+
estimatedReadWriteRatio: '70/30',
|
|
87
|
+
expectedThroughput: 'Low-medium (< 1000 req/s)',
|
|
88
|
+
dataComplexity: 'Medium — relational entities with joins'
|
|
89
|
+
}
|
|
90
|
+
},
|
|
91
|
+
recommendation: 'sql',
|
|
92
|
+
confidence: 85,
|
|
93
|
+
reasoning: 'Mock: task management apps benefit from relational integrity'
|
|
94
|
+
};
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// Architecture recommendations (architecture-recommender.md)
|
|
98
|
+
// architecture-recommender.md uniquely contains "requiresCloudProvider" as an output field
|
|
99
|
+
if (agent.includes('requirescloudprovider') || p.includes('recommend 3-5') || p.includes('deployment architectures')) {
|
|
100
|
+
return {
|
|
101
|
+
architectures: [
|
|
102
|
+
{
|
|
103
|
+
name: 'Local Hybrid Stack',
|
|
104
|
+
description: 'Express.js/FastAPI backend on localhost with PostgreSQL in Docker',
|
|
105
|
+
requiresCloudProvider: false,
|
|
106
|
+
bestFor: 'Experienced developers who want fast debugging with database isolation',
|
|
107
|
+
migrationPath: {
|
|
108
|
+
targetCloud: 'AWS ECS / Azure Container Apps / GCP Cloud Run',
|
|
109
|
+
steps: [
|
|
110
|
+
'Containerize backend with Docker',
|
|
111
|
+
'Push images to ECR/ACR/GCR',
|
|
112
|
+
'Deploy to container orchestration service'
|
|
113
|
+
]
|
|
114
|
+
}
|
|
115
|
+
},
|
|
116
|
+
{
|
|
117
|
+
name: 'Full Docker Compose',
|
|
118
|
+
description: 'All services in Docker Compose — database, backend, and frontend',
|
|
119
|
+
requiresCloudProvider: false,
|
|
120
|
+
bestFor: 'Teams who want identical environments across all machines',
|
|
121
|
+
migrationPath: {
|
|
122
|
+
targetCloud: 'AWS ECS / GCP Cloud Run',
|
|
123
|
+
steps: ['Convert docker-compose.yml to ECS task definitions', 'Set up managed database']
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
]
|
|
127
|
+
};
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// Question prefilling (question-prefiller.md)
|
|
131
|
+
// question-prefiller.md uniquely contains "TARGET_USERS" as an output field
|
|
132
|
+
if (agent.includes('target_users') || p.includes('target_users')) {
|
|
133
|
+
return {
|
|
134
|
+
TARGET_USERS: 'Developers and project teams managing software development tasks',
|
|
135
|
+
DEPLOYMENT_TARGET: 'Local development environment using Docker Compose; ready to migrate to AWS ECS or Azure Container Apps for production',
|
|
136
|
+
TECHNICAL_CONSIDERATIONS: 'Node.js/Express.js or FastAPI backend, React 18 + Vite frontend, PostgreSQL 16 in Docker for local development with production migration path',
|
|
137
|
+
SECURITY_AND_COMPLIANCE_REQUIREMENTS: 'JWT authentication with refresh tokens, bcrypt password hashing, HTTPS in production, standard OWASP security practices'
|
|
138
|
+
};
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
// Context generation (project-context-generator.md)
|
|
142
|
+
// project-context-generator.md uniquely contains "contextMarkdown" as an output field
|
|
143
|
+
if (agent.includes('contextmarkdown') || agent.includes('context generator')) {
|
|
144
|
+
const mockContext = `# Project Context
|
|
145
|
+
|
|
146
|
+
**Mission:** Build a test task manager app
|
|
147
|
+
**Architecture:** Local Hybrid Stack
|
|
148
|
+
**Database:** PostgreSQL 16
|
|
149
|
+
**Tech Stack:** Node.js, Express.js, React 18, Vite
|
|
150
|
+
**Deployment:** Local Docker Compose → AWS ECS
|
|
151
|
+
`;
|
|
152
|
+
return {
|
|
153
|
+
contextMarkdown: mockContext,
|
|
154
|
+
tokenCount: Math.ceil(mockContext.length / 4),
|
|
155
|
+
withinBudget: true
|
|
156
|
+
};
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
// Generic fallback
|
|
160
|
+
return { result: 'Mock JSON response', success: true };
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
/** generate — return a mock sponsor-call document */
|
|
164
|
+
async generate(prompt, maxTokens = 256, systemInstructions = null) {
|
|
165
|
+
this._track(prompt);
|
|
166
|
+
|
|
167
|
+
const p = (prompt || '').toLowerCase();
|
|
168
|
+
|
|
169
|
+
// Document generation
|
|
170
|
+
if (p.includes('sponsor') || p.includes('project brief') || p.includes('enhance')) {
|
|
171
|
+
return `# Sponsor Call — Test Task Manager
|
|
172
|
+
|
|
173
|
+
## Mission Statement
|
|
174
|
+
Build a test task manager app to help teams manage development tasks efficiently.
|
|
175
|
+
|
|
176
|
+
## Initial Scope & Key Features
|
|
177
|
+
MVP with task creation and basic authentication.
|
|
178
|
+
|
|
179
|
+
## Target Users
|
|
180
|
+
Developers and project teams managing software development tasks.
|
|
181
|
+
|
|
182
|
+
## Deployment Target
|
|
183
|
+
Local development environment with Docker Compose. Ready to migrate to AWS ECS when needed.
|
|
184
|
+
|
|
185
|
+
## Technical Considerations
|
|
186
|
+
Node.js/Express.js backend, React 18 + Vite frontend, PostgreSQL 16 in Docker.
|
|
187
|
+
|
|
188
|
+
## Security & Compliance
|
|
189
|
+
JWT authentication with refresh tokens, bcrypt password hashing, HTTPS in production.
|
|
190
|
+
|
|
191
|
+
## Architecture
|
|
192
|
+
Local Hybrid Stack: backend on localhost, database in Docker for isolation.
|
|
193
|
+
|
|
194
|
+
---
|
|
195
|
+
*Generated by AVC mock provider for E2E testing*
|
|
196
|
+
`;
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
// Improvement pass (iterative validation improve step)
|
|
200
|
+
if (p.includes('improve') || p.includes('enhancement')) {
|
|
201
|
+
return `# Sponsor Call — Test Task Manager (Improved)
|
|
202
|
+
|
|
203
|
+
## Mission Statement
|
|
204
|
+
Build a comprehensive test task manager app for development teams.
|
|
205
|
+
|
|
206
|
+
## Initial Scope & Key Features
|
|
207
|
+
MVP with task creation, assignment, and basic JWT authentication.
|
|
208
|
+
|
|
209
|
+
## Target Users
|
|
210
|
+
Software development teams and individual developers.
|
|
211
|
+
|
|
212
|
+
## Deployment Target
|
|
213
|
+
Local development with Docker Compose, production on AWS ECS.
|
|
214
|
+
|
|
215
|
+
## Technical Considerations
|
|
216
|
+
Express.js/Node.js backend, React 18 frontend, PostgreSQL 16 in Docker container.
|
|
217
|
+
|
|
218
|
+
## Security & Compliance
|
|
219
|
+
JWT + bcrypt authentication, OWASP security practices.
|
|
220
|
+
|
|
221
|
+
---
|
|
222
|
+
*Improved by AVC mock provider for E2E testing*
|
|
223
|
+
`;
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
return 'Mock text response from AVC E2E mock provider.';
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/** generateText — alias for generate (used by migration guide generator) */
|
|
230
|
+
async generateText(prompt, agentInstructions = null) {
|
|
231
|
+
return this.generate(prompt, 4096, agentInstructions);
|
|
232
|
+
}
|
|
233
|
+
}
|
|
@@ -0,0 +1,454 @@
|
|
|
1
|
+
import OpenAI from 'openai';
|
|
2
|
+
import { jsonrepair } from 'jsonrepair';
|
|
3
|
+
import { LLMProvider } from './llm-provider.js';
|
|
4
|
+
import { getMaxTokensForModel } from './llm-token-limits.js';
|
|
5
|
+
import fs from 'node:fs/promises';
|
|
6
|
+
import { existsSync } from 'node:fs';
|
|
7
|
+
import path from 'node:path';
|
|
8
|
+
|
|
9
|
+
export class OpenAIProvider extends LLMProvider {
|
|
10
|
+
constructor(model = 'gpt-5.2-chat-latest', reasoningEffort = 'medium') {
|
|
11
|
+
super('openai', model);
|
|
12
|
+
this.reasoningEffort = reasoningEffort;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
_createClient() {
|
|
16
|
+
if (process.env.OPENAI_AUTH_MODE === 'oauth') {
|
|
17
|
+
const oauthPath = path.join(process.cwd(), '.avc', 'openai-oauth.json');
|
|
18
|
+
// Only use OAuth mode if the token file actually exists — avoids per-call ENOENT warnings
|
|
19
|
+
if (existsSync(oauthPath)) return { mode: 'oauth' };
|
|
20
|
+
}
|
|
21
|
+
const apiKey = process.env.OPENAI_API_KEY;
|
|
22
|
+
if (!apiKey) throw new Error('OPENAI_API_KEY not set. Add it to your .env file.');
|
|
23
|
+
return new OpenAI({ apiKey });
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Load OAuth tokens from .avc/openai-oauth.json, refreshing if close to expiry.
|
|
28
|
+
*/
|
|
29
|
+
async _loadOAuthTokens() {
|
|
30
|
+
const oauthPath = path.join(process.cwd(), '.avc', 'openai-oauth.json');
|
|
31
|
+
const raw = await fs.readFile(oauthPath, 'utf8');
|
|
32
|
+
let tokens = JSON.parse(raw);
|
|
33
|
+
|
|
34
|
+
// Refresh if within 60s of expiry
|
|
35
|
+
if (tokens.expires - Date.now() < 60_000) {
|
|
36
|
+
const body = new URLSearchParams({
|
|
37
|
+
grant_type: 'refresh_token',
|
|
38
|
+
client_id: 'app_EMoamEEZ73f0CkXaXp7hrann',
|
|
39
|
+
refresh_token: tokens.refresh,
|
|
40
|
+
});
|
|
41
|
+
const resp = await fetch('https://auth.openai.com/oauth/token', {
|
|
42
|
+
method: 'POST',
|
|
43
|
+
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
|
|
44
|
+
body: body.toString(),
|
|
45
|
+
});
|
|
46
|
+
if (!resp.ok) throw new Error(`OAuth token refresh failed: ${resp.status}`);
|
|
47
|
+
const refreshed = await resp.json();
|
|
48
|
+
tokens = {
|
|
49
|
+
access: refreshed.access_token,
|
|
50
|
+
refresh: refreshed.refresh_token || tokens.refresh,
|
|
51
|
+
expires: Date.now() + (refreshed.expires_in || 3600) * 1000,
|
|
52
|
+
accountId: tokens.accountId,
|
|
53
|
+
};
|
|
54
|
+
await fs.writeFile(oauthPath, JSON.stringify(tokens, null, 2), 'utf8');
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
return { access: tokens.access, accountId: tokens.accountId };
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Call the ChatGPT Codex endpoint using OAuth bearer token.
|
|
62
|
+
*/
|
|
63
|
+
async _callChatGPTCodex(prompt, agentInstructions) {
|
|
64
|
+
const { access, accountId } = await this._loadOAuthTokens();
|
|
65
|
+
|
|
66
|
+
const t0 = Date.now();
|
|
67
|
+
const resp = await fetch('https://chatgpt.com/backend-api/codex/responses', {
|
|
68
|
+
method: 'POST',
|
|
69
|
+
headers: {
|
|
70
|
+
'Authorization': `Bearer ${access}`,
|
|
71
|
+
'chatgpt-account-id': accountId,
|
|
72
|
+
'Content-Type': 'application/json',
|
|
73
|
+
'OpenAI-Beta': 'responses=experimental',
|
|
74
|
+
'accept': 'application/json',
|
|
75
|
+
},
|
|
76
|
+
body: JSON.stringify({
|
|
77
|
+
model: this.model,
|
|
78
|
+
instructions: agentInstructions || 'You are a helpful assistant.',
|
|
79
|
+
input: [{ role: 'user', content: prompt }],
|
|
80
|
+
store: false,
|
|
81
|
+
stream: true,
|
|
82
|
+
}),
|
|
83
|
+
});
|
|
84
|
+
|
|
85
|
+
if (!resp.ok) {
|
|
86
|
+
const raw = await resp.text();
|
|
87
|
+
throw new Error(`ChatGPT Codex API error (${resp.status}): ${raw}`);
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// Parse SSE stream — accumulate text from delta events; use response.done for final text + usage
|
|
91
|
+
const body = await resp.text();
|
|
92
|
+
let text = '';
|
|
93
|
+
let finalEvent = null;
|
|
94
|
+
for (const line of body.split('\n')) {
|
|
95
|
+
if (!line.startsWith('data: ')) continue;
|
|
96
|
+
const chunk = line.slice(6).trim();
|
|
97
|
+
if (chunk === '[DONE]') break;
|
|
98
|
+
try {
|
|
99
|
+
const event = JSON.parse(chunk);
|
|
100
|
+
if (event.type === 'response.output_text.delta') {
|
|
101
|
+
text += event.delta ?? '';
|
|
102
|
+
} else if (event.type === 'response.output_text.done') {
|
|
103
|
+
text = event.text ?? text; // prefer the complete text when available
|
|
104
|
+
} else if (event.type === 'response.done' || event.type === 'response.completed') {
|
|
105
|
+
finalEvent = event.response ?? event;
|
|
106
|
+
// response.done may carry output_text if delta events were absent
|
|
107
|
+
if (!text) {
|
|
108
|
+
text = finalEvent?.output_text ?? finalEvent?.output?.[0]?.content?.[0]?.text ?? '';
|
|
109
|
+
}
|
|
110
|
+
break;
|
|
111
|
+
}
|
|
112
|
+
} catch { /* skip malformed lines */ }
|
|
113
|
+
}
|
|
114
|
+
const usage = finalEvent?.usage ?? null;
|
|
115
|
+
|
|
116
|
+
this._trackTokens(usage, {
|
|
117
|
+
prompt,
|
|
118
|
+
agentInstructions: agentInstructions ?? null,
|
|
119
|
+
response: text,
|
|
120
|
+
elapsed: Date.now() - t0,
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
return text;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
/**
|
|
127
|
+
* Determine if model uses Responses API instead of Chat Completions API
|
|
128
|
+
* Models that use Responses API: gpt-5.2-pro, gpt-5.2-codex
|
|
129
|
+
*/
|
|
130
|
+
_usesResponsesAPI() {
|
|
131
|
+
const responsesAPIModels = ['gpt-5.2-pro', 'gpt-5.2-codex'];
|
|
132
|
+
return responsesAPIModels.includes(this.model);
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* Call using Chat Completions API (standard models)
|
|
137
|
+
*/
|
|
138
|
+
async _callChatCompletions(prompt, maxTokens, systemInstructions) {
|
|
139
|
+
const messages = [];
|
|
140
|
+
|
|
141
|
+
// OpenAI uses message array - system instructions go first as system role
|
|
142
|
+
if (systemInstructions) {
|
|
143
|
+
messages.push({ role: 'system', content: systemInstructions });
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
messages.push({ role: 'user', content: prompt });
|
|
147
|
+
|
|
148
|
+
const params = {
|
|
149
|
+
model: this.model,
|
|
150
|
+
messages
|
|
151
|
+
};
|
|
152
|
+
|
|
153
|
+
// max_completion_tokens is the modern unified parameter; max_tokens is only for legacy gpt-3.5-turbo
|
|
154
|
+
if (this.model.startsWith('gpt-3.5')) {
|
|
155
|
+
params.max_tokens = maxTokens;
|
|
156
|
+
} else {
|
|
157
|
+
params.max_completion_tokens = maxTokens;
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
const response = await this._client.chat.completions.create(params);
|
|
161
|
+
|
|
162
|
+
this._trackTokens(response.usage);
|
|
163
|
+
return response.choices[0].message.content;
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
/**
|
|
167
|
+
* Call using Responses API (pro/codex models)
|
|
168
|
+
* @param {string} prompt
|
|
169
|
+
* @param {string|null} systemInstructions
|
|
170
|
+
* @param {Object|null} [promptPayload] - Partial payload { prompt, agentInstructions } to log
|
|
171
|
+
*/
|
|
172
|
+
async _callResponsesAPI(prompt, systemInstructions, promptPayload = null) {
|
|
173
|
+
// Combine system instructions with prompt
|
|
174
|
+
const fullInput = systemInstructions
|
|
175
|
+
? `${systemInstructions}\n\n${prompt}`
|
|
176
|
+
: prompt;
|
|
177
|
+
|
|
178
|
+
const params = {
|
|
179
|
+
model: this.model,
|
|
180
|
+
input: fullInput
|
|
181
|
+
};
|
|
182
|
+
|
|
183
|
+
// Add reasoning effort for models that support it
|
|
184
|
+
if (this.model === 'gpt-5.2-codex' || this.model === 'gpt-5.2-pro') {
|
|
185
|
+
params.reasoning = { effort: this.reasoningEffort };
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
const _t0 = Date.now();
|
|
189
|
+
const response = await this._withRetry(
|
|
190
|
+
() => this._client.responses.create(params),
|
|
191
|
+
'Responses API call'
|
|
192
|
+
);
|
|
193
|
+
const _elapsed = Date.now() - _t0;
|
|
194
|
+
|
|
195
|
+
const text = response.output_text;
|
|
196
|
+
|
|
197
|
+
// Track tokens if usage data is available
|
|
198
|
+
if (response.usage) {
|
|
199
|
+
const finalPayload = promptPayload ? {
|
|
200
|
+
...promptPayload,
|
|
201
|
+
response: text,
|
|
202
|
+
elapsed: _elapsed,
|
|
203
|
+
} : null;
|
|
204
|
+
this._trackTokens(response.usage, finalPayload);
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
return text;
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
async _callProvider(prompt, maxTokens, systemInstructions) {
|
|
211
|
+
// OAuth mode: all calls go through the ChatGPT Codex endpoint
|
|
212
|
+
if (this._client?.mode === 'oauth') {
|
|
213
|
+
return await this._callChatGPTCodex(prompt, systemInstructions);
|
|
214
|
+
}
|
|
215
|
+
if (this._usesResponsesAPI()) {
|
|
216
|
+
return await this._callResponsesAPI(prompt, systemInstructions);
|
|
217
|
+
} else {
|
|
218
|
+
return await this._callChatCompletions(prompt, maxTokens, systemInstructions);
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
/** True when oauth mode is active AND fallback to api-key is enabled AND key is present */
|
|
223
|
+
_hasFallback() {
|
|
224
|
+
return process.env.OPENAI_AUTH_MODE === 'oauth'
|
|
225
|
+
&& process.env.OPENAI_OAUTH_FALLBACK === 'true'
|
|
226
|
+
&& !!process.env.OPENAI_API_KEY;
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/** Create a plain OpenAI SDK client using OPENAI_API_KEY (for fallback) */
|
|
230
|
+
_createApiKeyClient() {
|
|
231
|
+
return new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
async generateJSON(prompt, agentInstructions = null, cachedContext = null) {
|
|
235
|
+
if (!this._client) {
|
|
236
|
+
this._client = this._createClient();
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
// OAuth path — route through ChatGPT Codex endpoint
|
|
240
|
+
if (this._client?.mode === 'oauth') {
|
|
241
|
+
try {
|
|
242
|
+
const jsonInstructions = (agentInstructions ? agentInstructions + '\n\n' : '')
|
|
243
|
+
+ 'You are a helpful assistant that always returns valid JSON. Your response must be a valid JSON object or array, nothing else.';
|
|
244
|
+
const text = await this._callChatGPTCodex(prompt, jsonInstructions);
|
|
245
|
+
let jsonStr = text.trim();
|
|
246
|
+
if (jsonStr.startsWith('```')) {
|
|
247
|
+
jsonStr = jsonStr.replace(/^```(?:json)?\s*\n?/, '').replace(/\n?\s*```\s*$/, '').trim();
|
|
248
|
+
}
|
|
249
|
+
try {
|
|
250
|
+
return JSON.parse(jsonStr);
|
|
251
|
+
} catch (firstError) {
|
|
252
|
+
if (jsonStr.startsWith('{') || jsonStr.startsWith('[')) {
|
|
253
|
+
try { return JSON.parse(jsonrepair(jsonStr)); } catch { /* fall through */ }
|
|
254
|
+
}
|
|
255
|
+
throw new Error(`Failed to parse JSON response: ${firstError.message}\n\nResponse was:\n${text}`);
|
|
256
|
+
}
|
|
257
|
+
} catch (oauthErr) {
|
|
258
|
+
if (!this._hasFallback()) throw oauthErr;
|
|
259
|
+
console.warn(`[openai] OAuth call failed, falling back to API key: ${oauthErr.message}`);
|
|
260
|
+
this._client = this._createApiKeyClient();
|
|
261
|
+
// fall through to standard paths below
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
const fullPrompt = agentInstructions ? `${agentInstructions}\n\n${prompt}` : prompt;
|
|
266
|
+
|
|
267
|
+
if (this._usesResponsesAPI()) {
|
|
268
|
+
// Responses API: Use system instructions to enforce JSON
|
|
269
|
+
const systemInstructions = 'You are a helpful assistant that always returns valid JSON. Your response must be a valid JSON object or array, nothing else.';
|
|
270
|
+
const _rApiPayload = this._promptLogger ? { prompt: fullPrompt, agentInstructions: agentInstructions ?? null } : null;
|
|
271
|
+
const response = await this._callResponsesAPI(fullPrompt, systemInstructions, _rApiPayload);
|
|
272
|
+
|
|
273
|
+
// Parse and return JSON
|
|
274
|
+
let jsonStr = response.trim();
|
|
275
|
+
if (jsonStr.startsWith('```')) {
|
|
276
|
+
jsonStr = jsonStr.replace(/^```(?:json)?\s*\n?/, '');
|
|
277
|
+
jsonStr = jsonStr.replace(/\n?\s*```\s*$/, '');
|
|
278
|
+
jsonStr = jsonStr.trim();
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
try {
|
|
282
|
+
return JSON.parse(jsonStr);
|
|
283
|
+
} catch (firstError) {
|
|
284
|
+
if (jsonStr.startsWith('{') || jsonStr.startsWith('[')) {
|
|
285
|
+
try {
|
|
286
|
+
return JSON.parse(jsonrepair(jsonStr));
|
|
287
|
+
} catch { /* fall through to throw */ }
|
|
288
|
+
}
|
|
289
|
+
throw new Error(`Failed to parse JSON response: ${firstError.message}\n\nResponse was:\n${response}`);
|
|
290
|
+
}
|
|
291
|
+
} else {
|
|
292
|
+
// Chat Completions API: Use native JSON mode.
|
|
293
|
+
// Build system message as: JSON_SYSTEM + agentInstructions + cachedContext.
|
|
294
|
+
// Putting agentInstructions in the system message (not the user message) makes the full
|
|
295
|
+
// prefix eligible for OpenAI automatic prefix caching — identical system prefixes across
|
|
296
|
+
// repeated calls of the same stage type get a 90% discount after the first 1024 tokens.
|
|
297
|
+
const JSON_SYSTEM = 'You are a helpful assistant that always returns valid JSON. Your response must be a valid JSON object or array, nothing else.';
|
|
298
|
+
const systemParts = [JSON_SYSTEM];
|
|
299
|
+
if (agentInstructions) systemParts.push(agentInstructions);
|
|
300
|
+
if (cachedContext) systemParts.push(`---\n\n${cachedContext}`);
|
|
301
|
+
const systemContent = systemParts.join('\n\n');
|
|
302
|
+
|
|
303
|
+
const messages = [
|
|
304
|
+
{ role: 'system', content: systemContent },
|
|
305
|
+
{ role: 'user', content: prompt },
|
|
306
|
+
];
|
|
307
|
+
|
|
308
|
+
const params = {
|
|
309
|
+
model: this.model,
|
|
310
|
+
messages,
|
|
311
|
+
};
|
|
312
|
+
|
|
313
|
+
// Use model-specific maximum tokens
|
|
314
|
+
const maxTokens = getMaxTokensForModel(this.model);
|
|
315
|
+
|
|
316
|
+
// max_completion_tokens is the modern unified parameter; max_tokens is only for legacy gpt-3.5-turbo
|
|
317
|
+
if (this.model.startsWith('gpt-3.5')) {
|
|
318
|
+
params.max_tokens = maxTokens;
|
|
319
|
+
} else {
|
|
320
|
+
params.max_completion_tokens = maxTokens;
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
// Enable JSON mode if model supports it (GPT-4+)
|
|
324
|
+
if (this.model.startsWith('gpt-4') || this.model.startsWith('gpt-5') || this.model.startsWith('o')) {
|
|
325
|
+
params.response_format = { type: 'json_object' };
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
// Extended 24-hour cache retention — free on gpt-5.x and gpt-4.1+ families.
|
|
329
|
+
// Keeps the system-message prefix in cache across long ceremony runs (>1 hr).
|
|
330
|
+
if (this.model.startsWith('gpt-5') || this.model.startsWith('gpt-4.1')) {
|
|
331
|
+
params.prompt_cache_retention = '24h';
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
const _t0Json = Date.now();
|
|
335
|
+
const response = await this._withRetry(
|
|
336
|
+
() => this._client.chat.completions.create(params),
|
|
337
|
+
'JSON generation (Chat Completions)'
|
|
338
|
+
);
|
|
339
|
+
|
|
340
|
+
const choice = response.choices[0];
|
|
341
|
+
const content = choice.message.content;
|
|
342
|
+
|
|
343
|
+
// Detect output truncation — json_object mode returns null/empty when cut off at token limit
|
|
344
|
+
if (choice.finish_reason === 'length' || !content) {
|
|
345
|
+
const maxTok = getMaxTokensForModel(this.model);
|
|
346
|
+
const usedOut = response.usage?.completion_tokens ?? '?';
|
|
347
|
+
throw new Error(
|
|
348
|
+
`Response truncated at token limit (finish_reason=length). ` +
|
|
349
|
+
`Model: ${this.model}, limit: ${maxTok}, used: ${usedOut}. ` +
|
|
350
|
+
`Increase max tokens for this model in llm-token-limits.js or reduce prompt size.`
|
|
351
|
+
);
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
this._trackTokens(response.usage, {
|
|
355
|
+
prompt: fullPrompt,
|
|
356
|
+
agentInstructions: agentInstructions ?? null,
|
|
357
|
+
response: content,
|
|
358
|
+
elapsed: Date.now() - _t0Json,
|
|
359
|
+
});
|
|
360
|
+
|
|
361
|
+
// Strip markdown code fences if present (defense-in-depth)
|
|
362
|
+
let jsonStr = content.trim();
|
|
363
|
+
if (jsonStr.startsWith('```')) {
|
|
364
|
+
jsonStr = jsonStr.replace(/^```(?:json)?\s*\n?/, '');
|
|
365
|
+
jsonStr = jsonStr.replace(/\n?\s*```\s*$/, '');
|
|
366
|
+
jsonStr = jsonStr.trim();
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
try {
|
|
370
|
+
return JSON.parse(jsonStr);
|
|
371
|
+
} catch (firstError) {
|
|
372
|
+
if (jsonStr.startsWith('{') || jsonStr.startsWith('[')) {
|
|
373
|
+
try {
|
|
374
|
+
return JSON.parse(jsonrepair(jsonStr));
|
|
375
|
+
} catch { /* fall through to throw */ }
|
|
376
|
+
}
|
|
377
|
+
throw new Error(`Failed to parse JSON response: ${firstError.message}\n\nResponse was:\n${content}`);
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
async generateText(prompt, agentInstructions = null, cachedContext = null) {
|
|
383
|
+
if (!this._client) {
|
|
384
|
+
this._client = this._createClient();
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
// OAuth path — route through ChatGPT Codex endpoint
|
|
388
|
+
if (this._client?.mode === 'oauth') {
|
|
389
|
+
try {
|
|
390
|
+
return await this._callChatGPTCodex(prompt, agentInstructions);
|
|
391
|
+
} catch (oauthErr) {
|
|
392
|
+
if (!this._hasFallback()) throw oauthErr;
|
|
393
|
+
console.warn(`[openai] OAuth call failed, falling back to API key: ${oauthErr.message}`);
|
|
394
|
+
this._client = this._createApiKeyClient();
|
|
395
|
+
// fall through to standard paths below
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
const fullPrompt = agentInstructions ? `${agentInstructions}\n\n${prompt}` : prompt;
|
|
400
|
+
|
|
401
|
+
if (this._usesResponsesAPI()) {
|
|
402
|
+
// Responses API
|
|
403
|
+
const _rApiPayload = this._promptLogger ? { prompt: fullPrompt, agentInstructions: agentInstructions ?? null } : null;
|
|
404
|
+
return await this._callResponsesAPI(fullPrompt, null, _rApiPayload);
|
|
405
|
+
} else {
|
|
406
|
+
// Chat Completions API.
|
|
407
|
+
// Build system message as agentInstructions + cachedContext so both are eligible for
|
|
408
|
+
// OpenAI automatic prefix caching (90% discount when system prefix is stable across calls).
|
|
409
|
+
const systemParts = [];
|
|
410
|
+
if (agentInstructions) systemParts.push(agentInstructions);
|
|
411
|
+
if (cachedContext) systemParts.push(cachedContext);
|
|
412
|
+
const messages = [];
|
|
413
|
+
if (systemParts.length > 0) {
|
|
414
|
+
messages.push({ role: 'system', content: systemParts.join('\n\n') });
|
|
415
|
+
}
|
|
416
|
+
messages.push({ role: 'user', content: prompt });
|
|
417
|
+
|
|
418
|
+
const params = {
|
|
419
|
+
model: this.model,
|
|
420
|
+
messages,
|
|
421
|
+
};
|
|
422
|
+
|
|
423
|
+
// Use model-specific maximum tokens
|
|
424
|
+
const maxTokens = getMaxTokensForModel(this.model);
|
|
425
|
+
|
|
426
|
+
// max_completion_tokens is the modern unified parameter; max_tokens is only for legacy gpt-3.5-turbo
|
|
427
|
+
if (this.model.startsWith('gpt-3.5')) {
|
|
428
|
+
params.max_tokens = maxTokens;
|
|
429
|
+
} else {
|
|
430
|
+
params.max_completion_tokens = maxTokens;
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
// Extended 24-hour cache retention — free on gpt-5.x and gpt-4.1+ families.
|
|
434
|
+
if (this.model.startsWith('gpt-5') || this.model.startsWith('gpt-4.1')) {
|
|
435
|
+
params.prompt_cache_retention = '24h';
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
const _t0Text = Date.now();
|
|
439
|
+
const response = await this._withRetry(
|
|
440
|
+
() => this._client.chat.completions.create(params),
|
|
441
|
+
'Text generation (Chat Completions)'
|
|
442
|
+
);
|
|
443
|
+
|
|
444
|
+
const textContent = response.choices[0].message.content;
|
|
445
|
+
this._trackTokens(response.usage, {
|
|
446
|
+
prompt: fullPrompt,
|
|
447
|
+
agentInstructions: agentInstructions ?? null,
|
|
448
|
+
response: textContent,
|
|
449
|
+
elapsed: Date.now() - _t0Text,
|
|
450
|
+
});
|
|
451
|
+
return textContent;
|
|
452
|
+
}
|
|
453
|
+
}
|
|
454
|
+
}
|