@sage-protocol/cli 0.3.10 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/dist/cli/browser-wallet-integration.js +0 -1
  2. package/dist/cli/cast-wallet-manager.js +0 -1
  3. package/dist/cli/commands/interview.js +149 -0
  4. package/dist/cli/commands/personal.js +234 -89
  5. package/dist/cli/commands/stake-status.js +0 -2
  6. package/dist/cli/config.js +28 -8
  7. package/dist/cli/governance-manager.js +28 -19
  8. package/dist/cli/index.js +32 -8
  9. package/dist/cli/library-manager.js +16 -6
  10. package/dist/cli/mcp-server-stdio.js +549 -0
  11. package/dist/cli/mcp-server.js +4 -30
  12. package/dist/cli/mcp-setup.md +35 -34
  13. package/dist/cli/metamask-integration.js +0 -1
  14. package/dist/cli/privy-wallet-manager.js +2 -2
  15. package/dist/cli/prompt-manager.js +0 -1
  16. package/dist/cli/services/doctor/fixers.js +1 -1
  17. package/dist/cli/services/mcp/env-loader.js +2 -0
  18. package/dist/cli/services/mcp/quick-start.js +14 -15
  19. package/dist/cli/services/mcp/sage-tool-registry.js +330 -0
  20. package/dist/cli/services/mcp/tool-args-validator.js +31 -0
  21. package/dist/cli/services/metaprompt/anthropic-client.js +87 -0
  22. package/dist/cli/services/metaprompt/interview-driver.js +161 -0
  23. package/dist/cli/services/metaprompt/model-client.js +49 -0
  24. package/dist/cli/services/metaprompt/openai-client.js +67 -0
  25. package/dist/cli/services/metaprompt/persistence.js +86 -0
  26. package/dist/cli/services/metaprompt/prompt-builder.js +186 -0
  27. package/dist/cli/services/metaprompt/session.js +18 -80
  28. package/dist/cli/services/metaprompt/slot-planner.js +115 -0
  29. package/dist/cli/services/metaprompt/templates.json +130 -0
  30. package/dist/cli/subdao.js +0 -3
  31. package/dist/cli/sxxx-manager.js +0 -1
  32. package/dist/cli/utils/tx-wait.js +0 -3
  33. package/dist/cli/wallet-manager.js +18 -19
  34. package/dist/cli/walletconnect-integration.js +0 -1
  35. package/dist/cli/wizard-manager.js +0 -1
  36. package/package.json +3 -1
@@ -0,0 +1,87 @@
1
+ let Anthropic;
2
+ try {
3
+ // Lazy / guarded require so OpenAI-only setups don't crash on import
4
+ // eslint-disable-next-line global-require
5
+ Anthropic = require('@anthropic-ai/sdk');
6
+ } catch (err) {
7
+ // Defer error until someone actually tries to construct the client
8
+ Anthropic = null;
9
+ }
10
+
11
+ /**
12
+ * Client for interacting with Anthropic's Claude API.
13
+ * Follows the ModelClient interface.
14
+ */
15
+ class AnthropicClient {
16
+ /**
17
+ * @param {object} config
18
+ * @param {string} config.apiKey
19
+ * @param {string} [config.model]
20
+ * @param {number} [config.temperature]
21
+ */
22
+ constructor({ apiKey, model, temperature }) {
23
+ if (!Anthropic) {
24
+ throw new Error('Anthropic SDK not installed. Please add @anthropic-ai/sdk or configure a different provider.');
25
+ }
26
+ this.apiKey = apiKey;
27
+ this.model = model || 'claude-sonnet-4-20250514';
28
+ this.temperature = temperature ?? 0.7;
29
+
30
+ this.client = new Anthropic({
31
+ apiKey: this.apiKey,
32
+ });
33
+ }
34
+
35
+ /**
36
+ * Stream chat completion.
37
+ * @param {Array<{role:string, content:string}>} messages
38
+ * @param {(token:string) => void} [onToken] Callback for each token
39
+ * @returns {Promise<string>} Full response text
40
+ */
41
+ async streamChat(messages, onToken) {
42
+ // Convert 'developer' role to 'system' property, as Anthropic handles system prompts separately
43
+ const systemMessage = messages.find(m => m.role === 'system' || m.role === 'developer');
44
+ const userAssistantMessages = messages.filter(m => m.role !== 'system' && m.role !== 'developer');
45
+
46
+ const stream = await this.client.messages.create({
47
+ model: this.model,
48
+ system: systemMessage ? systemMessage.content : undefined,
49
+ messages: userAssistantMessages,
50
+ max_tokens: 4096,
51
+ temperature: this.temperature,
52
+ stream: true,
53
+ });
54
+
55
+ let fullText = '';
56
+ for await (const chunk of stream) {
57
+ if (chunk.type === 'content_block_delta' && chunk.delta.type === 'text_delta') {
58
+ const content = chunk.delta.text;
59
+ fullText += content;
60
+ if (onToken) onToken(content);
61
+ }
62
+ }
63
+ return fullText;
64
+ }
65
+
66
+ /**
67
+ * Non-streaming chat completion.
68
+ * @param {Array<{role:string, content:string}>} messages
69
+ * @returns {Promise<string>} Full response text
70
+ */
71
+ async complete(messages) {
72
+ const systemMessage = messages.find(m => m.role === 'system' || m.role === 'developer');
73
+ const userAssistantMessages = messages.filter(m => m.role !== 'system' && m.role !== 'developer');
74
+
75
+ const response = await this.client.messages.create({
76
+ model: this.model,
77
+ system: systemMessage ? systemMessage.content : undefined,
78
+ messages: userAssistantMessages,
79
+ max_tokens: 4096,
80
+ temperature: this.temperature,
81
+ });
82
+
83
+ return response.content[0].text || '';
84
+ }
85
+ }
86
+
87
+ module.exports = AnthropicClient;
@@ -0,0 +1,161 @@
1
+ const { createModelClient } = require('./model-client');
2
+ const SlotPlanner = require('./slot-planner');
3
+ const PromptBuilder = require('./prompt-builder');
4
+
5
+ class InterviewDriver {
6
+ /**
7
+ * @param {object} config CLI config object
8
+ * @param {object} [options]
9
+ * @param {string} [options.templateKey]
10
+ * @param {string} [options.initialDescription]
11
+ * @param {string} [options.provider]
12
+ * @param {string} [options.model]
13
+ * @param {string} [options.apiKey]
14
+ */
15
+ constructor(config, options = {}) {
16
+ this.config = config;
17
+ this.client = createModelClient(config, {
18
+ provider: options.provider,
19
+ model: options.model,
20
+ apiKey: options.apiKey,
21
+ });
22
+ this.planner = new SlotPlanner(config, {
23
+ client: this.client,
24
+ provider: options.provider,
25
+ model: options.model,
26
+ apiKey: options.apiKey,
27
+ });
28
+ this.builder = new PromptBuilder();
29
+
30
+ this.templateKey = options.templateKey || 'custom';
31
+ this.initialDescription = options.initialDescription || '';
32
+ this.transcript = [];
33
+ this.slots = []; // The plan
34
+ this.answers = {}; // The state of filled slots
35
+ this.isDone = false;
36
+ }
37
+ /**
38
+ * Initialize the driver. Call this before using getNextQuestion().
39
+ * @param {object} [precomputedState] Optional pre-existing state to restore
40
+ * @param {Array} [precomputedState.slots] Pre-computed slots (skips LLM planning)
41
+ * @param {object} [precomputedState.answers] Pre-filled answers
42
+ * @param {Array} [precomputedState.transcript] Prior transcript
43
+ */
44
+ async init(precomputedState = null) {
45
+ // If we have precomputed state, restore it (skip expensive LLM planning)
46
+ if (precomputedState && precomputedState.slots && precomputedState.slots.length > 0) {
47
+ this.slots = precomputedState.slots;
48
+ this.answers = precomputedState.answers || {};
49
+ this.transcript = precomputedState.transcript || [];
50
+ return;
51
+ }
52
+
53
+ // 1. Generate Plan (uses LLM for custom templates, static for built-ins)
54
+ this.slots = await this.planner.planSlots(this.templateKey, this.initialDescription);
55
+
56
+ // 2. Check if initialDescription fills any slots immediately (One-shot extraction)
57
+ if (this.initialDescription) {
58
+ await this.#extractFromText(this.initialDescription);
59
+ }
60
+ }
61
+
62
+ async getNextQuestion() {
63
+ if (this.isDone) return null;
64
+
65
+ // 1. Find highest priority slot that is required and empty
66
+ const nextSlot = this.slots.find(s => s.required && !this.answers[s.key]);
67
+
68
+ if (!nextSlot) {
69
+ // All required slots filled. Check optional high priority?
70
+ const optionalSlot = this.slots.find(s => !this.answers[s.key] && s.priority <= 2);
71
+ if (optionalSlot) {
72
+ return await this.#generateQuestion(optionalSlot);
73
+ }
74
+
75
+ // If we are here, we are effectively done with the structured part.
76
+ this.isDone = true;
77
+ return null;
78
+ }
79
+
80
+ return await this.#generateQuestion(nextSlot);
81
+ }
82
+
83
+ async processAnswer(answer) {
84
+ this.transcript.push({ role: 'user', content: answer });
85
+ await this.#extractFromText(answer); // Extract whatever we can from the answer
86
+ }
87
+
88
+ generateSystemPrompt() {
89
+ return this.builder.buildSystemPrompt(this.templateKey, this.slots, this.answers);
90
+ }
91
+
92
+ // --- Private Helpers ---
93
+
94
+ async #generateQuestion(slot) {
95
+ const systemPrompt = `
96
+ You are an expert interviewer. Your goal is to extract specific information from the user to build an AI persona.
97
+ Current Target: "${slot.label}" (${slot.description})
98
+ Context So Far: ${JSON.stringify(this.answers)}
99
+
100
+ Generate a single, clear, friendly question to ask the user for this information.
101
+ Do not preface it with "Question:" or similar. Just ask.
102
+ `;
103
+ const messages = [
104
+ { role: 'system', content: systemPrompt },
105
+ { role: 'user', content: 'Ask the question.' }
106
+ ];
107
+
108
+ const question = await this.client.complete(messages);
109
+ this.transcript.push({ role: 'assistant', content: question });
110
+ return question;
111
+ }
112
+
113
+ async #extractFromText(text) {
114
+ // Use LLM to map text to slots
115
+ // We send the schema + current answers + text
116
+ // Use JSON.stringify for the text to handle quotes, braces, and special chars safely
117
+ const emptySlots = this.slots.filter(s => !this.answers[s.key]);
118
+ if (emptySlots.length === 0) return;
119
+
120
+ const systemPrompt = `
121
+ You are a data extraction engine.
122
+ Slots Schema: ${JSON.stringify(emptySlots)}
123
+ (Only extract for empty slots, but update existing if new info is better).
124
+
125
+ Input Text: ${JSON.stringify(text)}
126
+
127
+ Return a JSON object where keys match the slot keys and values are the extracted information.
128
+ If no information is found for a slot, omit the key.
129
+ CRITICAL: Return ONLY a valid JSON object, no markdown, no explanation.
130
+ `;
131
+
132
+ const messages = [
133
+ { role: 'system', content: systemPrompt },
134
+ { role: 'user', content: 'Extract data from the input text.' }
135
+ ];
136
+
137
+ try {
138
+ const response = await this.client.complete(messages);
139
+ // Try to find a JSON object in the response
140
+ const jsonMatch = response.match(/\{[\s\S]*\}/);
141
+ if (!jsonMatch) return;
142
+
143
+ const updates = JSON.parse(jsonMatch[0]);
144
+
145
+ // Update state
146
+ for (const [key, val] of Object.entries(updates)) {
147
+ if (val && (typeof val === 'string' || typeof val === 'object')) {
148
+ this.answers[key] = typeof val === 'object' ? JSON.stringify(val) : val;
149
+ }
150
+ }
151
+ } catch (e) {
152
+ // Silent fail - extraction is best-effort
153
+ if (process.env.SAGE_VERBOSE) {
154
+ console.warn('Extraction failed:', e.message);
155
+ }
156
+ }
157
+ }
158
+ }
159
+
160
+ module.exports = InterviewDriver;
161
+
@@ -0,0 +1,49 @@
1
+ const OpenAIClient = require('./openai-client');
2
+ const AnthropicClient = require('./anthropic-client');
3
+
4
+ /**
5
+ * Factory to create the appropriate ModelClient based on available keys.
6
+ * @param {object} config The config object (from require('../../config'))
7
+ * @param {object} [overrides] Optional overrides for testing or CLI flags
8
+ * @param {string} [overrides.provider] 'openai' | 'anthropic'
9
+ * @param {string} [overrides.model]
10
+ * @param {string} [overrides.apiKey]
11
+ * @returns {OpenAIClient|AnthropicClient}
12
+ */
13
+ function createModelClient(config, overrides = {}) {
14
+ // 1. Explicit override
15
+ if (overrides.provider && overrides.apiKey) {
16
+ if (overrides.provider === 'anthropic') {
17
+ return new AnthropicClient({ apiKey: overrides.apiKey, model: overrides.model });
18
+ }
19
+ if (overrides.provider === 'openai') {
20
+ return new OpenAIClient({ apiKey: overrides.apiKey, model: overrides.model });
21
+ }
22
+ }
23
+
24
+ // 2. Config-based resolution
25
+ const aiConfig = config.readAIConfig();
26
+
27
+ // Prefer explicit provider override if key is in config
28
+ if (overrides.provider === 'anthropic' && aiConfig.anthropicApiKey) {
29
+ return new AnthropicClient({ apiKey: aiConfig.anthropicApiKey, model: overrides.model });
30
+ }
31
+ if (overrides.provider === 'openai' && aiConfig.openaiApiKey) {
32
+ return new OpenAIClient({ apiKey: aiConfig.openaiApiKey, model: overrides.model });
33
+ }
34
+
35
+ // 3. Default preference: Anthropic > OpenAI
36
+ if (aiConfig.anthropicApiKey) {
37
+ return new AnthropicClient({ apiKey: aiConfig.anthropicApiKey, model: overrides.model });
38
+ }
39
+ if (aiConfig.openaiApiKey) {
40
+ return new OpenAIClient({ apiKey: aiConfig.openaiApiKey, model: overrides.model });
41
+ }
42
+
43
+ throw new Error(
44
+ 'No AI keys found. Please run `sage config ai set --provider anthropic --key <sk-ant-...>` or set ANTHROPIC_API_KEY.'
45
+ );
46
+ }
47
+
48
+ module.exports = { createModelClient };
49
+
@@ -0,0 +1,67 @@
1
+ const OpenAI = require('openai');
2
+
3
+ /**
4
+ * Client for interacting with OpenAI's chat API.
5
+ * Follows the ModelClient interface:
6
+ * - streamChat(messages, onToken): Promise<fullText>
7
+ * - complete(messages): Promise<fullText>
8
+ */
9
+ class OpenAIClient {
10
+ /**
11
+ * @param {object} config
12
+ * @param {string} config.apiKey
13
+ * @param {string} [config.model]
14
+ * @param {number} [config.temperature]
15
+ */
16
+ constructor({ apiKey, model, temperature }) {
17
+ this.apiKey = apiKey;
18
+ this.model = model || 'gpt-4o';
19
+ this.temperature = temperature ?? 0.7;
20
+
21
+ this.client = new OpenAI({
22
+ apiKey: this.apiKey,
23
+ });
24
+ }
25
+
26
+ /**
27
+ * Stream chat completion.
28
+ * @param {Array<{role:string, content:string}>} messages
29
+ * @param {(token:string) => void} [onToken] Callback for each token
30
+ * @returns {Promise<string>} Full response text
31
+ */
32
+ async streamChat(messages, onToken) {
33
+ const stream = await this.client.chat.completions.create({
34
+ model: this.model,
35
+ messages,
36
+ temperature: this.temperature,
37
+ stream: true,
38
+ });
39
+
40
+ let fullText = '';
41
+ for await (const chunk of stream) {
42
+ const content = chunk.choices[0]?.delta?.content || '';
43
+ if (content) {
44
+ fullText += content;
45
+ if (onToken) onToken(content);
46
+ }
47
+ }
48
+ return fullText;
49
+ }
50
+
51
+ /**
52
+ * Non-streaming chat completion.
53
+ * @param {Array<{role:string, content:string}>} messages
54
+ * @returns {Promise<string>} Full response text
55
+ */
56
+ async complete(messages) {
57
+ const completion = await this.client.chat.completions.create({
58
+ model: this.model,
59
+ messages,
60
+ temperature: this.temperature,
61
+ });
62
+ return completion.choices[0]?.message?.content || '';
63
+ }
64
+ }
65
+
66
+ module.exports = OpenAIClient;
67
+
@@ -0,0 +1,86 @@
1
+ const fs = require('fs');
2
+ const path = require('path');
3
+
4
+ class MetapromptPersistence {
5
+ constructor(config) {
6
+ this.config = config;
7
+ this.projectDir = config.getProjectDir();
8
+ }
9
+
10
+ getPaths(slug) {
11
+ const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
12
+ return {
13
+ metaprompt: path.join(this.projectDir, '.sage', 'metaprompts', `${slug}.md`),
14
+ skill: path.join(this.projectDir, 'prompts', 'skills', `${slug}.md`),
15
+ history: path.join(this.projectDir, '.sage', 'metaprompts', 'history', `${slug}-${timestamp}.json`),
16
+ };
17
+ }
18
+
19
+ saveMetaprompt(slug, data) {
20
+ const paths = this.getPaths(slug);
21
+
22
+ // Ensure dirs
23
+ fs.mkdirSync(path.dirname(paths.metaprompt), { recursive: true });
24
+ fs.mkdirSync(path.dirname(paths.history), { recursive: true });
25
+
26
+ // 1. Save history JSON (The "Metaprompt Artifact")
27
+ fs.writeFileSync(paths.history, JSON.stringify(data, null, 2));
28
+
29
+ // 2. Save readable Markdown Metaprompt (Source of Truth)
30
+ const mdContent = `---
31
+ slug: ${slug}
32
+ template: ${data.templateKey}
33
+ date: ${new Date().toISOString()}
34
+ ---
35
+
36
+ # Metaprompt Session: ${slug}
37
+
38
+ ## Transcript
39
+ ${data.transcript.map(t => `**${t.role.toUpperCase()}**: ${t.content}`).join('\n\n')}
40
+
41
+ ## Filled Slots
42
+ \`\`\`json
43
+ ${JSON.stringify(data.answers, null, 2)}
44
+ \`\`\`
45
+ `;
46
+ fs.writeFileSync(paths.metaprompt, mdContent);
47
+
48
+ return paths;
49
+ }
50
+
51
+ saveSkill(slug, systemPrompt) {
52
+ const paths = this.getPaths(slug);
53
+ fs.mkdirSync(path.dirname(paths.skill), { recursive: true });
54
+
55
+ const content = `---
56
+ title: ${slug} (Generated Agent)
57
+ description: Generated via sage interview
58
+ ---
59
+
60
+ ${systemPrompt}
61
+ `;
62
+ fs.writeFileSync(paths.skill, content);
63
+ return paths.skill;
64
+ }
65
+
66
+ appendToAgentsList(slug) {
67
+ const agentsPath = path.join(this.projectDir, 'prompts', 'agents.md');
68
+ const dir = path.dirname(agentsPath);
69
+ if (!fs.existsSync(dir)) return;
70
+
71
+ const entry = `- [${slug}](./skills/${slug}.md)`;
72
+ let existing = '';
73
+ if (fs.existsSync(agentsPath)) {
74
+ try {
75
+ existing = fs.readFileSync(agentsPath, 'utf8');
76
+ if (existing.includes(entry)) return; // avoid duplicates
77
+ } catch (_) {
78
+ existing = '';
79
+ }
80
+ }
81
+ const prefix = existing && !existing.endsWith('\n') ? '\n' : '';
82
+ fs.appendFileSync(agentsPath, `${prefix}${entry}\n`);
83
+ }
84
+ }
85
+
86
+ module.exports = MetapromptPersistence;
@@ -0,0 +1,186 @@
1
+ const templates = require('./templates.json');
2
+
3
+ /**
4
+ * Mapping of slot groups to section names and order.
5
+ * This normalizes the various group names used in templates.
6
+ */
7
+ const GROUP_MAPPINGS = {
8
+ // Identity-related
9
+ 'identity': 'identity',
10
+ 'role': 'identity',
11
+
12
+ // Goals-related
13
+ 'goals': 'goals',
14
+ 'goal': 'goals',
15
+
16
+ // Context-related
17
+ 'context': 'context',
18
+ 'stack': 'context',
19
+
20
+ // Constraints-related
21
+ 'constraints': 'constraints',
22
+ 'guidelines': 'constraints',
23
+
24
+ // Tools-related
25
+ 'tools': 'tools',
26
+
27
+ // Output-related
28
+ 'output': 'output',
29
+ 'format': 'output',
30
+ };
31
+
32
+ /**
33
+ * Tool definitions for inject_tools feature.
34
+ * Maps tool names to descriptions.
35
+ */
36
+ const TOOL_DEFINITIONS = {
37
+ 'sage prompts': '`sage prompts`: Workspace-first prompt and skill management (init, list, diff, publish).',
38
+ 'sage project': '`sage project`: Manifest management, IPFS publishing, and governance proposal helpers.',
39
+ 'sage governance': '`sage governance`: Governance commands for viewing proposals, voting, and managing SubDAOs.',
40
+ 'sage gov': '`sage gov`: Alias for sage governance - proposal management and voting.',
41
+ 'sage treasury': '`sage treasury`: Treasury inspection, withdrawals, boosts, and bond operations.',
42
+ 'sage ipfs': '`sage ipfs`: IPFS operations including pinning, fetching, and gateway management.',
43
+ 'sage dev': '`sage dev`: Development commands for building, testing, and deploying.',
44
+ 'sage config': '`sage config`: Configuration management for RPC, wallets, and AI providers.',
45
+ 'sage mcp': '`sage mcp`: MCP server management for agent integrations.',
46
+ };
47
+
48
+ class PromptBuilder {
49
+ constructor() {
50
+ // Could be extended to load custom tool definitions
51
+ }
52
+
53
+ /**
54
+ * Builds the final system prompt string from the filled slots.
55
+ * @param {string} templateKey
56
+ * @param {Array} slots - Definition of slots
57
+ * @param {Object} answers - Key-value pairs of filled answers
58
+ * @returns {string} The final XML-structured system prompt
59
+ */
60
+ buildSystemPrompt(templateKey, slots, answers) {
61
+ const template = templates[templateKey] || templates['custom'];
62
+
63
+ // Build sections in order
64
+ const sections = [];
65
+
66
+ // 1. Role/Identity section
67
+ const identityContent = this.#buildGroupSection(slots, answers, 'identity');
68
+ const roleContent = this.#getAnswer(answers, 'role') || identityContent || template.description;
69
+ if (roleContent) {
70
+ sections.push(this.#buildSection('role', roleContent));
71
+ }
72
+
73
+ // 2. Goals section
74
+ const goalsContent = this.#buildGroupSection(slots, answers, 'goals');
75
+ const goalContent = this.#getAnswer(answers, 'goal') || goalsContent || template.default_goal;
76
+ if (goalContent) {
77
+ sections.push(this.#buildSection('goals', goalContent));
78
+ }
79
+
80
+ // 3. Context section (tech stack, data sources, etc.)
81
+ const contextSection = this.#buildGroupSection(slots, answers, 'context');
82
+ if (contextSection) {
83
+ sections.push(this.#buildSection('context', contextSection));
84
+ }
85
+
86
+ // 4. Constraints section
87
+ const constraintsSection = this.#buildGroupSection(slots, answers, 'constraints');
88
+ if (constraintsSection) {
89
+ sections.push(this.#buildSection('constraints', constraintsSection));
90
+ }
91
+
92
+ // 5. Tools section
93
+ const toolsSection = this.#buildToolsSection(template.inject_tools, answers, slots);
94
+ if (toolsSection) {
95
+ sections.push(toolsSection);
96
+ }
97
+
98
+ // 6. Output format section
99
+ const outputSection = this.#buildGroupSection(slots, answers, 'output');
100
+ if (outputSection) {
101
+ sections.push(this.#buildSection('output_format', outputSection));
102
+ }
103
+
104
+ // 7. Final reminder
105
+ sections.push(`Remember: You are ${template.name}. Stay in character and follow your constraints.`);
106
+
107
+ return sections.filter(Boolean).join('\n\n');
108
+ }
109
+
110
+ #getAnswer(answers, key) {
111
+ return answers[key] || '';
112
+ }
113
+
114
+ #buildSection(title, content) {
115
+ if (!content) return '';
116
+ const tag = this.#sanitizeTag(title);
117
+ return `<${tag}>\n${content}\n</${tag}>`;
118
+ }
119
+
120
+ #sanitizeTag(title) {
121
+ // Remove special chars, replace spaces with underscores, lowercase
122
+ return title.toLowerCase().replace(/[^a-z0-9_]/g, '_').replace(/_+/g, '_');
123
+ }
124
+
125
+ /**
126
+ * Build a section from all slots matching a normalized group.
127
+ */
128
+ #buildGroupSection(slots, answers, normalizedGroup) {
129
+ // Find slots matching this group (handling group name variations)
130
+ const groupSlots = slots.filter(slot => {
131
+ const slotGroup = (slot.group || '').toLowerCase();
132
+ const mapped = GROUP_MAPPINGS[slotGroup] || slotGroup;
133
+ return mapped === normalizedGroup;
134
+ });
135
+
136
+ if (!groupSlots.length) return '';
137
+
138
+ const lines = groupSlots.map(slot => {
139
+ const ans = answers[slot.key];
140
+ if (!ans) return null;
141
+ return `- **${slot.label}**: ${ans}`;
142
+ }).filter(Boolean);
143
+
144
+ return lines.length ? lines.join('\n') : '';
145
+ }
146
+
147
+ #buildToolsSection(injectedTools = [], answers, slots) {
148
+ const definitions = [];
149
+
150
+ // Add injected tools from template
151
+ for (const tool of injectedTools) {
152
+ const def = TOOL_DEFINITIONS[tool];
153
+ if (def) {
154
+ definitions.push(`- ${def}`);
155
+ } else {
156
+ // Unknown tool - add a generic entry
157
+ definitions.push(`- \`${tool}\`: Available tool (see documentation).`);
158
+ }
159
+ }
160
+
161
+ // Add tools from slots with 'tools' group
162
+ const toolSlots = slots.filter(s => GROUP_MAPPINGS[s.group] === 'tools');
163
+ for (const slot of toolSlots) {
164
+ const ans = answers[slot.key];
165
+ if (ans && !definitions.some(d => d.includes(ans))) {
166
+ definitions.push(`- ${ans}`);
167
+ }
168
+ }
169
+
170
+ // Add generic tool answer if provided
171
+ if (answers.tools && !definitions.some(d => d.includes(answers.tools))) {
172
+ definitions.push(`- ${answers.tools}`);
173
+ }
174
+
175
+ if (!definitions.length) return '';
176
+
177
+ return `<tools>
178
+ You have access to the following tools and capabilities:
179
+ ${definitions.join('\n')}
180
+
181
+ Use these tools when appropriate to fulfill the user's request.
182
+ </tools>`;
183
+ }
184
+ }
185
+
186
+ module.exports = PromptBuilder;