@sage-protocol/cli 0.4.0 → 0.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/browser-wallet-integration.js +0 -1
- package/dist/cli/cast-wallet-manager.js +0 -1
- package/dist/cli/commands/interview.js +149 -0
- package/dist/cli/commands/personal.js +138 -79
- package/dist/cli/commands/stake-status.js +0 -2
- package/dist/cli/config.js +28 -8
- package/dist/cli/governance-manager.js +28 -19
- package/dist/cli/index.js +32 -8
- package/dist/cli/library-manager.js +16 -6
- package/dist/cli/mcp-server-stdio.js +549 -0
- package/dist/cli/mcp-server.js +4 -30
- package/dist/cli/metamask-integration.js +0 -1
- package/dist/cli/privy-wallet-manager.js +2 -2
- package/dist/cli/prompt-manager.js +0 -1
- package/dist/cli/services/doctor/fixers.js +1 -1
- package/dist/cli/services/mcp/env-loader.js +2 -0
- package/dist/cli/services/mcp/quick-start.js +14 -15
- package/dist/cli/services/mcp/sage-tool-registry.js +330 -0
- package/dist/cli/services/mcp/tool-args-validator.js +31 -0
- package/dist/cli/services/metaprompt/anthropic-client.js +87 -0
- package/dist/cli/services/metaprompt/interview-driver.js +161 -0
- package/dist/cli/services/metaprompt/model-client.js +49 -0
- package/dist/cli/services/metaprompt/openai-client.js +67 -0
- package/dist/cli/services/metaprompt/persistence.js +86 -0
- package/dist/cli/services/metaprompt/prompt-builder.js +186 -0
- package/dist/cli/services/metaprompt/session.js +18 -80
- package/dist/cli/services/metaprompt/slot-planner.js +115 -0
- package/dist/cli/services/metaprompt/templates.json +130 -0
- package/dist/cli/subdao.js +0 -3
- package/dist/cli/sxxx-manager.js +0 -1
- package/dist/cli/utils/tx-wait.js +0 -3
- package/dist/cli/wallet-manager.js +18 -19
- package/dist/cli/walletconnect-integration.js +0 -1
- package/dist/cli/wizard-manager.js +0 -1
- package/package.json +3 -1
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
const { createModelClient } = require('./model-client');
|
|
2
|
+
const SlotPlanner = require('./slot-planner');
|
|
3
|
+
const PromptBuilder = require('./prompt-builder');
|
|
4
|
+
|
|
5
|
+
class InterviewDriver {
|
|
6
|
+
/**
|
|
7
|
+
* @param {object} config CLI config object
|
|
8
|
+
* @param {object} [options]
|
|
9
|
+
* @param {string} [options.templateKey]
|
|
10
|
+
* @param {string} [options.initialDescription]
|
|
11
|
+
* @param {string} [options.provider]
|
|
12
|
+
* @param {string} [options.model]
|
|
13
|
+
* @param {string} [options.apiKey]
|
|
14
|
+
*/
|
|
15
|
+
constructor(config, options = {}) {
|
|
16
|
+
this.config = config;
|
|
17
|
+
this.client = createModelClient(config, {
|
|
18
|
+
provider: options.provider,
|
|
19
|
+
model: options.model,
|
|
20
|
+
apiKey: options.apiKey,
|
|
21
|
+
});
|
|
22
|
+
this.planner = new SlotPlanner(config, {
|
|
23
|
+
client: this.client,
|
|
24
|
+
provider: options.provider,
|
|
25
|
+
model: options.model,
|
|
26
|
+
apiKey: options.apiKey,
|
|
27
|
+
});
|
|
28
|
+
this.builder = new PromptBuilder();
|
|
29
|
+
|
|
30
|
+
this.templateKey = options.templateKey || 'custom';
|
|
31
|
+
this.initialDescription = options.initialDescription || '';
|
|
32
|
+
this.transcript = [];
|
|
33
|
+
this.slots = []; // The plan
|
|
34
|
+
this.answers = {}; // The state of filled slots
|
|
35
|
+
this.isDone = false;
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* Initialize the driver. Call this before using getNextQuestion().
|
|
39
|
+
* @param {object} [precomputedState] Optional pre-existing state to restore
|
|
40
|
+
* @param {Array} [precomputedState.slots] Pre-computed slots (skips LLM planning)
|
|
41
|
+
* @param {object} [precomputedState.answers] Pre-filled answers
|
|
42
|
+
* @param {Array} [precomputedState.transcript] Prior transcript
|
|
43
|
+
*/
|
|
44
|
+
async init(precomputedState = null) {
|
|
45
|
+
// If we have precomputed state, restore it (skip expensive LLM planning)
|
|
46
|
+
if (precomputedState && precomputedState.slots && precomputedState.slots.length > 0) {
|
|
47
|
+
this.slots = precomputedState.slots;
|
|
48
|
+
this.answers = precomputedState.answers || {};
|
|
49
|
+
this.transcript = precomputedState.transcript || [];
|
|
50
|
+
return;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
// 1. Generate Plan (uses LLM for custom templates, static for built-ins)
|
|
54
|
+
this.slots = await this.planner.planSlots(this.templateKey, this.initialDescription);
|
|
55
|
+
|
|
56
|
+
// 2. Check if initialDescription fills any slots immediately (One-shot extraction)
|
|
57
|
+
if (this.initialDescription) {
|
|
58
|
+
await this.#extractFromText(this.initialDescription);
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
async getNextQuestion() {
|
|
63
|
+
if (this.isDone) return null;
|
|
64
|
+
|
|
65
|
+
// 1. Find highest priority slot that is required and empty
|
|
66
|
+
const nextSlot = this.slots.find(s => s.required && !this.answers[s.key]);
|
|
67
|
+
|
|
68
|
+
if (!nextSlot) {
|
|
69
|
+
// All required slots filled. Check optional high priority?
|
|
70
|
+
const optionalSlot = this.slots.find(s => !this.answers[s.key] && s.priority <= 2);
|
|
71
|
+
if (optionalSlot) {
|
|
72
|
+
return await this.#generateQuestion(optionalSlot);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// If we are here, we are effectively done with the structured part.
|
|
76
|
+
this.isDone = true;
|
|
77
|
+
return null;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
return await this.#generateQuestion(nextSlot);
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
async processAnswer(answer) {
|
|
84
|
+
this.transcript.push({ role: 'user', content: answer });
|
|
85
|
+
await this.#extractFromText(answer); // Extract whatever we can from the answer
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
generateSystemPrompt() {
|
|
89
|
+
return this.builder.buildSystemPrompt(this.templateKey, this.slots, this.answers);
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
// --- Private Helpers ---
|
|
93
|
+
|
|
94
|
+
async #generateQuestion(slot) {
|
|
95
|
+
const systemPrompt = `
|
|
96
|
+
You are an expert interviewer. Your goal is to extract specific information from the user to build an AI persona.
|
|
97
|
+
Current Target: "${slot.label}" (${slot.description})
|
|
98
|
+
Context So Far: ${JSON.stringify(this.answers)}
|
|
99
|
+
|
|
100
|
+
Generate a single, clear, friendly question to ask the user for this information.
|
|
101
|
+
Do not preface it with "Question:" or similar. Just ask.
|
|
102
|
+
`;
|
|
103
|
+
const messages = [
|
|
104
|
+
{ role: 'system', content: systemPrompt },
|
|
105
|
+
{ role: 'user', content: 'Ask the question.' }
|
|
106
|
+
];
|
|
107
|
+
|
|
108
|
+
const question = await this.client.complete(messages);
|
|
109
|
+
this.transcript.push({ role: 'assistant', content: question });
|
|
110
|
+
return question;
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
async #extractFromText(text) {
|
|
114
|
+
// Use LLM to map text to slots
|
|
115
|
+
// We send the schema + current answers + text
|
|
116
|
+
// Use JSON.stringify for the text to handle quotes, braces, and special chars safely
|
|
117
|
+
const emptySlots = this.slots.filter(s => !this.answers[s.key]);
|
|
118
|
+
if (emptySlots.length === 0) return;
|
|
119
|
+
|
|
120
|
+
const systemPrompt = `
|
|
121
|
+
You are a data extraction engine.
|
|
122
|
+
Slots Schema: ${JSON.stringify(emptySlots)}
|
|
123
|
+
(Only extract for empty slots, but update existing if new info is better).
|
|
124
|
+
|
|
125
|
+
Input Text: ${JSON.stringify(text)}
|
|
126
|
+
|
|
127
|
+
Return a JSON object where keys match the slot keys and values are the extracted information.
|
|
128
|
+
If no information is found for a slot, omit the key.
|
|
129
|
+
CRITICAL: Return ONLY a valid JSON object, no markdown, no explanation.
|
|
130
|
+
`;
|
|
131
|
+
|
|
132
|
+
const messages = [
|
|
133
|
+
{ role: 'system', content: systemPrompt },
|
|
134
|
+
{ role: 'user', content: 'Extract data from the input text.' }
|
|
135
|
+
];
|
|
136
|
+
|
|
137
|
+
try {
|
|
138
|
+
const response = await this.client.complete(messages);
|
|
139
|
+
// Try to find a JSON object in the response
|
|
140
|
+
const jsonMatch = response.match(/\{[\s\S]*\}/);
|
|
141
|
+
if (!jsonMatch) return;
|
|
142
|
+
|
|
143
|
+
const updates = JSON.parse(jsonMatch[0]);
|
|
144
|
+
|
|
145
|
+
// Update state
|
|
146
|
+
for (const [key, val] of Object.entries(updates)) {
|
|
147
|
+
if (val && (typeof val === 'string' || typeof val === 'object')) {
|
|
148
|
+
this.answers[key] = typeof val === 'object' ? JSON.stringify(val) : val;
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
} catch (e) {
|
|
152
|
+
// Silent fail - extraction is best-effort
|
|
153
|
+
if (process.env.SAGE_VERBOSE) {
|
|
154
|
+
console.warn('Extraction failed:', e.message);
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
module.exports = InterviewDriver;
|
|
161
|
+
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
const OpenAIClient = require('./openai-client');
|
|
2
|
+
const AnthropicClient = require('./anthropic-client');
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Factory to create the appropriate ModelClient based on available keys.
|
|
6
|
+
* @param {object} config The config object (from require('../../config'))
|
|
7
|
+
* @param {object} [overrides] Optional overrides for testing or CLI flags
|
|
8
|
+
* @param {string} [overrides.provider] 'openai' | 'anthropic'
|
|
9
|
+
* @param {string} [overrides.model]
|
|
10
|
+
* @param {string} [overrides.apiKey]
|
|
11
|
+
* @returns {OpenAIClient|AnthropicClient}
|
|
12
|
+
*/
|
|
13
|
+
function createModelClient(config, overrides = {}) {
|
|
14
|
+
// 1. Explicit override
|
|
15
|
+
if (overrides.provider && overrides.apiKey) {
|
|
16
|
+
if (overrides.provider === 'anthropic') {
|
|
17
|
+
return new AnthropicClient({ apiKey: overrides.apiKey, model: overrides.model });
|
|
18
|
+
}
|
|
19
|
+
if (overrides.provider === 'openai') {
|
|
20
|
+
return new OpenAIClient({ apiKey: overrides.apiKey, model: overrides.model });
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
// 2. Config-based resolution
|
|
25
|
+
const aiConfig = config.readAIConfig();
|
|
26
|
+
|
|
27
|
+
// Prefer explicit provider override if key is in config
|
|
28
|
+
if (overrides.provider === 'anthropic' && aiConfig.anthropicApiKey) {
|
|
29
|
+
return new AnthropicClient({ apiKey: aiConfig.anthropicApiKey, model: overrides.model });
|
|
30
|
+
}
|
|
31
|
+
if (overrides.provider === 'openai' && aiConfig.openaiApiKey) {
|
|
32
|
+
return new OpenAIClient({ apiKey: aiConfig.openaiApiKey, model: overrides.model });
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
// 3. Default preference: Anthropic > OpenAI
|
|
36
|
+
if (aiConfig.anthropicApiKey) {
|
|
37
|
+
return new AnthropicClient({ apiKey: aiConfig.anthropicApiKey, model: overrides.model });
|
|
38
|
+
}
|
|
39
|
+
if (aiConfig.openaiApiKey) {
|
|
40
|
+
return new OpenAIClient({ apiKey: aiConfig.openaiApiKey, model: overrides.model });
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
throw new Error(
|
|
44
|
+
'No AI keys found. Please run `sage config ai set --provider anthropic --key <sk-ant-...>` or set ANTHROPIC_API_KEY.'
|
|
45
|
+
);
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
module.exports = { createModelClient };
|
|
49
|
+
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
const OpenAI = require('openai');
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Client for interacting with OpenAI's chat API.
|
|
5
|
+
* Follows the ModelClient interface:
|
|
6
|
+
* - streamChat(messages, onToken): Promise<fullText>
|
|
7
|
+
* - complete(messages): Promise<fullText>
|
|
8
|
+
*/
|
|
9
|
+
class OpenAIClient {
|
|
10
|
+
/**
|
|
11
|
+
* @param {object} config
|
|
12
|
+
* @param {string} config.apiKey
|
|
13
|
+
* @param {string} [config.model]
|
|
14
|
+
* @param {number} [config.temperature]
|
|
15
|
+
*/
|
|
16
|
+
constructor({ apiKey, model, temperature }) {
|
|
17
|
+
this.apiKey = apiKey;
|
|
18
|
+
this.model = model || 'gpt-4o';
|
|
19
|
+
this.temperature = temperature ?? 0.7;
|
|
20
|
+
|
|
21
|
+
this.client = new OpenAI({
|
|
22
|
+
apiKey: this.apiKey,
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Stream chat completion.
|
|
28
|
+
* @param {Array<{role:string, content:string}>} messages
|
|
29
|
+
* @param {(token:string) => void} [onToken] Callback for each token
|
|
30
|
+
* @returns {Promise<string>} Full response text
|
|
31
|
+
*/
|
|
32
|
+
async streamChat(messages, onToken) {
|
|
33
|
+
const stream = await this.client.chat.completions.create({
|
|
34
|
+
model: this.model,
|
|
35
|
+
messages,
|
|
36
|
+
temperature: this.temperature,
|
|
37
|
+
stream: true,
|
|
38
|
+
});
|
|
39
|
+
|
|
40
|
+
let fullText = '';
|
|
41
|
+
for await (const chunk of stream) {
|
|
42
|
+
const content = chunk.choices[0]?.delta?.content || '';
|
|
43
|
+
if (content) {
|
|
44
|
+
fullText += content;
|
|
45
|
+
if (onToken) onToken(content);
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
return fullText;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Non-streaming chat completion.
|
|
53
|
+
* @param {Array<{role:string, content:string}>} messages
|
|
54
|
+
* @returns {Promise<string>} Full response text
|
|
55
|
+
*/
|
|
56
|
+
async complete(messages) {
|
|
57
|
+
const completion = await this.client.chat.completions.create({
|
|
58
|
+
model: this.model,
|
|
59
|
+
messages,
|
|
60
|
+
temperature: this.temperature,
|
|
61
|
+
});
|
|
62
|
+
return completion.choices[0]?.message?.content || '';
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
module.exports = OpenAIClient;
|
|
67
|
+
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
const fs = require('fs');
|
|
2
|
+
const path = require('path');
|
|
3
|
+
|
|
4
|
+
class MetapromptPersistence {
|
|
5
|
+
constructor(config) {
|
|
6
|
+
this.config = config;
|
|
7
|
+
this.projectDir = config.getProjectDir();
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
getPaths(slug) {
|
|
11
|
+
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
|
12
|
+
return {
|
|
13
|
+
metaprompt: path.join(this.projectDir, '.sage', 'metaprompts', `${slug}.md`),
|
|
14
|
+
skill: path.join(this.projectDir, 'prompts', 'skills', `${slug}.md`),
|
|
15
|
+
history: path.join(this.projectDir, '.sage', 'metaprompts', 'history', `${slug}-${timestamp}.json`),
|
|
16
|
+
};
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
saveMetaprompt(slug, data) {
|
|
20
|
+
const paths = this.getPaths(slug);
|
|
21
|
+
|
|
22
|
+
// Ensure dirs
|
|
23
|
+
fs.mkdirSync(path.dirname(paths.metaprompt), { recursive: true });
|
|
24
|
+
fs.mkdirSync(path.dirname(paths.history), { recursive: true });
|
|
25
|
+
|
|
26
|
+
// 1. Save history JSON (The "Metaprompt Artifact")
|
|
27
|
+
fs.writeFileSync(paths.history, JSON.stringify(data, null, 2));
|
|
28
|
+
|
|
29
|
+
// 2. Save readable Markdown Metaprompt (Source of Truth)
|
|
30
|
+
const mdContent = `---
|
|
31
|
+
slug: ${slug}
|
|
32
|
+
template: ${data.templateKey}
|
|
33
|
+
date: ${new Date().toISOString()}
|
|
34
|
+
---
|
|
35
|
+
|
|
36
|
+
# Metaprompt Session: ${slug}
|
|
37
|
+
|
|
38
|
+
## Transcript
|
|
39
|
+
${data.transcript.map(t => `**${t.role.toUpperCase()}**: ${t.content}`).join('\n\n')}
|
|
40
|
+
|
|
41
|
+
## Filled Slots
|
|
42
|
+
\`\`\`json
|
|
43
|
+
${JSON.stringify(data.answers, null, 2)}
|
|
44
|
+
\`\`\`
|
|
45
|
+
`;
|
|
46
|
+
fs.writeFileSync(paths.metaprompt, mdContent);
|
|
47
|
+
|
|
48
|
+
return paths;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
saveSkill(slug, systemPrompt) {
|
|
52
|
+
const paths = this.getPaths(slug);
|
|
53
|
+
fs.mkdirSync(path.dirname(paths.skill), { recursive: true });
|
|
54
|
+
|
|
55
|
+
const content = `---
|
|
56
|
+
title: ${slug} (Generated Agent)
|
|
57
|
+
description: Generated via sage interview
|
|
58
|
+
---
|
|
59
|
+
|
|
60
|
+
${systemPrompt}
|
|
61
|
+
`;
|
|
62
|
+
fs.writeFileSync(paths.skill, content);
|
|
63
|
+
return paths.skill;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
appendToAgentsList(slug) {
|
|
67
|
+
const agentsPath = path.join(this.projectDir, 'prompts', 'agents.md');
|
|
68
|
+
const dir = path.dirname(agentsPath);
|
|
69
|
+
if (!fs.existsSync(dir)) return;
|
|
70
|
+
|
|
71
|
+
const entry = `- [${slug}](./skills/${slug}.md)`;
|
|
72
|
+
let existing = '';
|
|
73
|
+
if (fs.existsSync(agentsPath)) {
|
|
74
|
+
try {
|
|
75
|
+
existing = fs.readFileSync(agentsPath, 'utf8');
|
|
76
|
+
if (existing.includes(entry)) return; // avoid duplicates
|
|
77
|
+
} catch (_) {
|
|
78
|
+
existing = '';
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
const prefix = existing && !existing.endsWith('\n') ? '\n' : '';
|
|
82
|
+
fs.appendFileSync(agentsPath, `${prefix}${entry}\n`);
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
module.exports = MetapromptPersistence;
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
const templates = require('./templates.json');
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Mapping of slot groups to section names and order.
|
|
5
|
+
* This normalizes the various group names used in templates.
|
|
6
|
+
*/
|
|
7
|
+
const GROUP_MAPPINGS = {
|
|
8
|
+
// Identity-related
|
|
9
|
+
'identity': 'identity',
|
|
10
|
+
'role': 'identity',
|
|
11
|
+
|
|
12
|
+
// Goals-related
|
|
13
|
+
'goals': 'goals',
|
|
14
|
+
'goal': 'goals',
|
|
15
|
+
|
|
16
|
+
// Context-related
|
|
17
|
+
'context': 'context',
|
|
18
|
+
'stack': 'context',
|
|
19
|
+
|
|
20
|
+
// Constraints-related
|
|
21
|
+
'constraints': 'constraints',
|
|
22
|
+
'guidelines': 'constraints',
|
|
23
|
+
|
|
24
|
+
// Tools-related
|
|
25
|
+
'tools': 'tools',
|
|
26
|
+
|
|
27
|
+
// Output-related
|
|
28
|
+
'output': 'output',
|
|
29
|
+
'format': 'output',
|
|
30
|
+
};
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Tool definitions for inject_tools feature.
|
|
34
|
+
* Maps tool names to descriptions.
|
|
35
|
+
*/
|
|
36
|
+
const TOOL_DEFINITIONS = {
|
|
37
|
+
'sage prompts': '`sage prompts`: Workspace-first prompt and skill management (init, list, diff, publish).',
|
|
38
|
+
'sage project': '`sage project`: Manifest management, IPFS publishing, and governance proposal helpers.',
|
|
39
|
+
'sage governance': '`sage governance`: Governance commands for viewing proposals, voting, and managing SubDAOs.',
|
|
40
|
+
'sage gov': '`sage gov`: Alias for sage governance - proposal management and voting.',
|
|
41
|
+
'sage treasury': '`sage treasury`: Treasury inspection, withdrawals, boosts, and bond operations.',
|
|
42
|
+
'sage ipfs': '`sage ipfs`: IPFS operations including pinning, fetching, and gateway management.',
|
|
43
|
+
'sage dev': '`sage dev`: Development commands for building, testing, and deploying.',
|
|
44
|
+
'sage config': '`sage config`: Configuration management for RPC, wallets, and AI providers.',
|
|
45
|
+
'sage mcp': '`sage mcp`: MCP server management for agent integrations.',
|
|
46
|
+
};
|
|
47
|
+
|
|
48
|
+
class PromptBuilder {
|
|
49
|
+
constructor() {
|
|
50
|
+
// Could be extended to load custom tool definitions
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Builds the final system prompt string from the filled slots.
|
|
55
|
+
* @param {string} templateKey
|
|
56
|
+
* @param {Array} slots - Definition of slots
|
|
57
|
+
* @param {Object} answers - Key-value pairs of filled answers
|
|
58
|
+
* @returns {string} The final XML-structured system prompt
|
|
59
|
+
*/
|
|
60
|
+
buildSystemPrompt(templateKey, slots, answers) {
|
|
61
|
+
const template = templates[templateKey] || templates['custom'];
|
|
62
|
+
|
|
63
|
+
// Build sections in order
|
|
64
|
+
const sections = [];
|
|
65
|
+
|
|
66
|
+
// 1. Role/Identity section
|
|
67
|
+
const identityContent = this.#buildGroupSection(slots, answers, 'identity');
|
|
68
|
+
const roleContent = this.#getAnswer(answers, 'role') || identityContent || template.description;
|
|
69
|
+
if (roleContent) {
|
|
70
|
+
sections.push(this.#buildSection('role', roleContent));
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// 2. Goals section
|
|
74
|
+
const goalsContent = this.#buildGroupSection(slots, answers, 'goals');
|
|
75
|
+
const goalContent = this.#getAnswer(answers, 'goal') || goalsContent || template.default_goal;
|
|
76
|
+
if (goalContent) {
|
|
77
|
+
sections.push(this.#buildSection('goals', goalContent));
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// 3. Context section (tech stack, data sources, etc.)
|
|
81
|
+
const contextSection = this.#buildGroupSection(slots, answers, 'context');
|
|
82
|
+
if (contextSection) {
|
|
83
|
+
sections.push(this.#buildSection('context', contextSection));
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// 4. Constraints section
|
|
87
|
+
const constraintsSection = this.#buildGroupSection(slots, answers, 'constraints');
|
|
88
|
+
if (constraintsSection) {
|
|
89
|
+
sections.push(this.#buildSection('constraints', constraintsSection));
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
// 5. Tools section
|
|
93
|
+
const toolsSection = this.#buildToolsSection(template.inject_tools, answers, slots);
|
|
94
|
+
if (toolsSection) {
|
|
95
|
+
sections.push(toolsSection);
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// 6. Output format section
|
|
99
|
+
const outputSection = this.#buildGroupSection(slots, answers, 'output');
|
|
100
|
+
if (outputSection) {
|
|
101
|
+
sections.push(this.#buildSection('output_format', outputSection));
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
// 7. Final reminder
|
|
105
|
+
sections.push(`Remember: You are ${template.name}. Stay in character and follow your constraints.`);
|
|
106
|
+
|
|
107
|
+
return sections.filter(Boolean).join('\n\n');
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
#getAnswer(answers, key) {
|
|
111
|
+
return answers[key] || '';
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
#buildSection(title, content) {
|
|
115
|
+
if (!content) return '';
|
|
116
|
+
const tag = this.#sanitizeTag(title);
|
|
117
|
+
return `<${tag}>\n${content}\n</${tag}>`;
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
#sanitizeTag(title) {
|
|
121
|
+
// Remove special chars, replace spaces with underscores, lowercase
|
|
122
|
+
return title.toLowerCase().replace(/[^a-z0-9_]/g, '_').replace(/_+/g, '_');
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Build a section from all slots matching a normalized group.
|
|
127
|
+
*/
|
|
128
|
+
#buildGroupSection(slots, answers, normalizedGroup) {
|
|
129
|
+
// Find slots matching this group (handling group name variations)
|
|
130
|
+
const groupSlots = slots.filter(slot => {
|
|
131
|
+
const slotGroup = (slot.group || '').toLowerCase();
|
|
132
|
+
const mapped = GROUP_MAPPINGS[slotGroup] || slotGroup;
|
|
133
|
+
return mapped === normalizedGroup;
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
if (!groupSlots.length) return '';
|
|
137
|
+
|
|
138
|
+
const lines = groupSlots.map(slot => {
|
|
139
|
+
const ans = answers[slot.key];
|
|
140
|
+
if (!ans) return null;
|
|
141
|
+
return `- **${slot.label}**: ${ans}`;
|
|
142
|
+
}).filter(Boolean);
|
|
143
|
+
|
|
144
|
+
return lines.length ? lines.join('\n') : '';
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
#buildToolsSection(injectedTools = [], answers, slots) {
|
|
148
|
+
const definitions = [];
|
|
149
|
+
|
|
150
|
+
// Add injected tools from template
|
|
151
|
+
for (const tool of injectedTools) {
|
|
152
|
+
const def = TOOL_DEFINITIONS[tool];
|
|
153
|
+
if (def) {
|
|
154
|
+
definitions.push(`- ${def}`);
|
|
155
|
+
} else {
|
|
156
|
+
// Unknown tool - add a generic entry
|
|
157
|
+
definitions.push(`- \`${tool}\`: Available tool (see documentation).`);
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// Add tools from slots with 'tools' group
|
|
162
|
+
const toolSlots = slots.filter(s => GROUP_MAPPINGS[s.group] === 'tools');
|
|
163
|
+
for (const slot of toolSlots) {
|
|
164
|
+
const ans = answers[slot.key];
|
|
165
|
+
if (ans && !definitions.some(d => d.includes(ans))) {
|
|
166
|
+
definitions.push(`- ${ans}`);
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
// Add generic tool answer if provided
|
|
171
|
+
if (answers.tools && !definitions.some(d => d.includes(answers.tools))) {
|
|
172
|
+
definitions.push(`- ${answers.tools}`);
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
if (!definitions.length) return '';
|
|
176
|
+
|
|
177
|
+
return `<tools>
|
|
178
|
+
You have access to the following tools and capabilities:
|
|
179
|
+
${definitions.join('\n')}
|
|
180
|
+
|
|
181
|
+
Use these tools when appropriate to fulfill the user's request.
|
|
182
|
+
</tools>`;
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
module.exports = PromptBuilder;
|
|
@@ -1,32 +1,33 @@
|
|
|
1
|
+
const { createModelClient } = require('./model-client');
|
|
2
|
+
|
|
1
3
|
class MetapromptSession {
|
|
2
4
|
constructor(options = {}) {
|
|
3
5
|
const {
|
|
4
6
|
title,
|
|
5
7
|
goal,
|
|
6
|
-
targetModel = 'gpt-5',
|
|
7
8
|
interviewStyle = 'one-question-at-a-time',
|
|
8
|
-
apiKey,
|
|
9
9
|
temperature = 0.7,
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
10
|
+
process: proc,
|
|
11
|
+
config, // injected config dependency
|
|
12
|
+
provider, // optional override
|
|
13
|
+
model, // optional override
|
|
14
|
+
apiKey, // optional override
|
|
14
15
|
} = options;
|
|
15
16
|
|
|
16
|
-
this.
|
|
17
|
-
this.
|
|
18
|
-
this.env = env || this.process.env;
|
|
17
|
+
this.process = proc || global.process;
|
|
18
|
+
this.config = config || require('../../config'); // fallback to global require if not passed
|
|
19
19
|
|
|
20
|
-
|
|
21
|
-
|
|
20
|
+
// Initialize the ModelClient using the factory
|
|
21
|
+
try {
|
|
22
|
+
this.client = createModelClient(this.config, { provider, model, apiKey });
|
|
23
|
+
} catch (error) {
|
|
24
|
+
// If createModelClient fails (no keys), re-throw with the helpful message
|
|
25
|
+
throw error;
|
|
22
26
|
}
|
|
23
27
|
|
|
24
28
|
this.title = title || 'Untitled Metaprompt';
|
|
25
29
|
this.goal = goal || 'Design a high quality system prompt.';
|
|
26
|
-
this.targetModel = targetModel;
|
|
27
|
-
this.openAiModel = openAiModel;
|
|
28
30
|
this.interviewStyle = interviewStyle;
|
|
29
|
-
this.apiKey = apiKey;
|
|
30
31
|
this.temperature = temperature;
|
|
31
32
|
|
|
32
33
|
this.messages = [
|
|
@@ -48,7 +49,6 @@ class MetapromptSession {
|
|
|
48
49
|
'Before emitting FINAL_PROMPT, summarise the agreed requirements and confirm readiness.',
|
|
49
50
|
'If the user types `/finish` or explicitly asks you to finalise, produce the FINAL_PROMPT immediately.',
|
|
50
51
|
`Primary goal: ${this.goal}.`,
|
|
51
|
-
`Target downstream model: ${this.targetModel}.`,
|
|
52
52
|
`Interview cadence preference: ${this.interviewStyle}.`,
|
|
53
53
|
'Guardrails: capture constraints, desired behaviours, failure modes, tone, tooling, fallback defaults, and evaluation hooks.',
|
|
54
54
|
'When providing FINAL_PROMPT, include only the polished system prompt inside the fenced block (no commentary).',
|
|
@@ -66,55 +66,6 @@ class MetapromptSession {
|
|
|
66
66
|
this.transcript.push({ role, content, timestamp: new Date().toISOString() });
|
|
67
67
|
}
|
|
68
68
|
|
|
69
|
-
async requestAssistantMessage(onToken) {
|
|
70
|
-
const response = await this.axios.post(
|
|
71
|
-
'https://api.openai.com/v1/chat/completions',
|
|
72
|
-
{
|
|
73
|
-
model: this.openAiModel,
|
|
74
|
-
messages: this.messages,
|
|
75
|
-
temperature: this.temperature,
|
|
76
|
-
stream: true,
|
|
77
|
-
},
|
|
78
|
-
{
|
|
79
|
-
headers: {
|
|
80
|
-
Authorization: `Bearer ${this.apiKey}`,
|
|
81
|
-
'Content-Type': 'application/json',
|
|
82
|
-
},
|
|
83
|
-
responseType: 'stream',
|
|
84
|
-
}
|
|
85
|
-
);
|
|
86
|
-
|
|
87
|
-
return new Promise((resolve, reject) => {
|
|
88
|
-
let buffer = '';
|
|
89
|
-
let fullText = '';
|
|
90
|
-
response.data.on('data', (chunk) => {
|
|
91
|
-
buffer += chunk.toString('utf8');
|
|
92
|
-
const lines = buffer.split('\n');
|
|
93
|
-
buffer = lines.pop() || '';
|
|
94
|
-
for (const line of lines) {
|
|
95
|
-
const trimmed = line.trim();
|
|
96
|
-
if (!trimmed.startsWith('data:')) continue;
|
|
97
|
-
const payload = trimmed.slice(5).trim();
|
|
98
|
-
if (!payload || payload === '[DONE]') continue;
|
|
99
|
-
try {
|
|
100
|
-
const parsed = JSON.parse(payload);
|
|
101
|
-
const delta = parsed.choices?.[0]?.delta || {};
|
|
102
|
-
if (typeof delta.content === 'string' && delta.content.length) {
|
|
103
|
-
fullText += delta.content;
|
|
104
|
-
if (typeof onToken === 'function') onToken(delta.content);
|
|
105
|
-
}
|
|
106
|
-
if (delta.role === 'assistant' && typeof onToken === 'function') {
|
|
107
|
-
onToken('');
|
|
108
|
-
}
|
|
109
|
-
} catch (_) {}
|
|
110
|
-
}
|
|
111
|
-
});
|
|
112
|
-
|
|
113
|
-
response.data.on('end', () => resolve(fullText.trim()));
|
|
114
|
-
response.data.on('error', (error) => reject(error));
|
|
115
|
-
});
|
|
116
|
-
}
|
|
117
|
-
|
|
118
69
|
extractFinalPrompt(text) {
|
|
119
70
|
if (!text) return null;
|
|
120
71
|
const match = /FINAL_PROMPT\s*```(?:[a-zA-Z0-9_-]+\n)?([\s\S]*?)```/i.exec(text);
|
|
@@ -131,24 +82,13 @@ class MetapromptSession {
|
|
|
131
82
|
this.#appendTranscript('user', this.messages[1].content);
|
|
132
83
|
|
|
133
84
|
while (!this.finalPrompt) {
|
|
134
|
-
let assistantMessage;
|
|
85
|
+
let assistantMessage = '';
|
|
135
86
|
try {
|
|
136
|
-
assistantMessage = await this.
|
|
87
|
+
assistantMessage = await this.client.streamChat(this.messages, (token) => {
|
|
137
88
|
if (token) printAssistant(token);
|
|
138
89
|
});
|
|
139
90
|
} catch (error) {
|
|
140
|
-
|
|
141
|
-
let detail = error?.response?.data?.error?.message;
|
|
142
|
-
if (!detail && error?.response?.data) {
|
|
143
|
-
try {
|
|
144
|
-
detail = JSON.stringify(error.response.data);
|
|
145
|
-
} catch (_) {}
|
|
146
|
-
}
|
|
147
|
-
if (!detail) detail = error.message;
|
|
148
|
-
const hint = detail && detail.includes('model')
|
|
149
|
-
? ' (hint: set SAGE_METAPROMPT_MODEL or choose a supported OpenAI model)'
|
|
150
|
-
: '';
|
|
151
|
-
throw new Error(status ? `OpenAI error (${status}): ${detail}${hint}` : `OpenAI error: ${detail}${hint}`);
|
|
91
|
+
throw new Error(`AI Model error: ${error.message}`);
|
|
152
92
|
}
|
|
153
93
|
|
|
154
94
|
if (assistantMessage) {
|
|
@@ -192,8 +132,6 @@ class MetapromptSession {
|
|
|
192
132
|
finalAssistantMessage: this.finalAssistantMessage,
|
|
193
133
|
transcript: this.transcript,
|
|
194
134
|
messages: this.messages,
|
|
195
|
-
targetModel: this.targetModel,
|
|
196
|
-
openAiModel: this.openAiModel,
|
|
197
135
|
};
|
|
198
136
|
}
|
|
199
137
|
}
|