@sage-protocol/cli 0.4.0 → 0.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/dist/cli/browser-wallet-integration.js +0 -1
  2. package/dist/cli/cast-wallet-manager.js +0 -1
  3. package/dist/cli/commands/interview.js +149 -0
  4. package/dist/cli/commands/personal.js +138 -79
  5. package/dist/cli/commands/prompts.js +242 -87
  6. package/dist/cli/commands/stake-status.js +0 -2
  7. package/dist/cli/config.js +28 -8
  8. package/dist/cli/governance-manager.js +28 -19
  9. package/dist/cli/index.js +32 -8
  10. package/dist/cli/library-manager.js +16 -6
  11. package/dist/cli/mcp-server-stdio.js +759 -156
  12. package/dist/cli/mcp-server.js +4 -30
  13. package/dist/cli/metamask-integration.js +0 -1
  14. package/dist/cli/privy-wallet-manager.js +2 -2
  15. package/dist/cli/prompt-manager.js +0 -1
  16. package/dist/cli/services/artifact-manager.js +198 -0
  17. package/dist/cli/services/doctor/fixers.js +1 -1
  18. package/dist/cli/services/mcp/env-loader.js +2 -0
  19. package/dist/cli/services/mcp/prompt-result-formatter.js +8 -1
  20. package/dist/cli/services/mcp/quick-start.js +14 -15
  21. package/dist/cli/services/mcp/sage-tool-registry.js +322 -0
  22. package/dist/cli/services/mcp/tool-args-validator.js +43 -0
  23. package/dist/cli/services/metaprompt/anthropic-client.js +87 -0
  24. package/dist/cli/services/metaprompt/interview-driver.js +161 -0
  25. package/dist/cli/services/metaprompt/model-client.js +49 -0
  26. package/dist/cli/services/metaprompt/openai-client.js +67 -0
  27. package/dist/cli/services/metaprompt/persistence.js +86 -0
  28. package/dist/cli/services/metaprompt/prompt-builder.js +186 -0
  29. package/dist/cli/services/metaprompt/session.js +18 -80
  30. package/dist/cli/services/metaprompt/slot-planner.js +115 -0
  31. package/dist/cli/services/metaprompt/templates.json +130 -0
  32. package/dist/cli/services/project-context.js +98 -0
  33. package/dist/cli/subdao.js +0 -3
  34. package/dist/cli/sxxx-manager.js +0 -1
  35. package/dist/cli/utils/aliases.js +0 -6
  36. package/dist/cli/utils/tx-wait.js +0 -3
  37. package/dist/cli/wallet-manager.js +18 -19
  38. package/dist/cli/walletconnect-integration.js +0 -1
  39. package/dist/cli/wizard-manager.js +0 -1
  40. package/package.json +3 -1
  41. package/dist/cli/commands/prompt-test.js +0 -176
  42. package/dist/cli/commands/prompt.js +0 -2531
@@ -0,0 +1,67 @@
1
+ const OpenAI = require('openai');
2
+
3
+ /**
4
+ * Client for interacting with OpenAI's chat API.
5
+ * Follows the ModelClient interface:
6
+ * - streamChat(messages, onToken): Promise<fullText>
7
+ * - complete(messages): Promise<fullText>
8
+ */
9
+ class OpenAIClient {
10
+ /**
11
+ * @param {object} config
12
+ * @param {string} config.apiKey
13
+ * @param {string} [config.model]
14
+ * @param {number} [config.temperature]
15
+ */
16
+ constructor({ apiKey, model, temperature }) {
17
+ this.apiKey = apiKey;
18
+ this.model = model || 'gpt-4o';
19
+ this.temperature = temperature ?? 0.7;
20
+
21
+ this.client = new OpenAI({
22
+ apiKey: this.apiKey,
23
+ });
24
+ }
25
+
26
+ /**
27
+ * Stream chat completion.
28
+ * @param {Array<{role:string, content:string}>} messages
29
+ * @param {(token:string) => void} [onToken] Callback for each token
30
+ * @returns {Promise<string>} Full response text
31
+ */
32
+ async streamChat(messages, onToken) {
33
+ const stream = await this.client.chat.completions.create({
34
+ model: this.model,
35
+ messages,
36
+ temperature: this.temperature,
37
+ stream: true,
38
+ });
39
+
40
+ let fullText = '';
41
+ for await (const chunk of stream) {
42
+ const content = chunk.choices[0]?.delta?.content || '';
43
+ if (content) {
44
+ fullText += content;
45
+ if (onToken) onToken(content);
46
+ }
47
+ }
48
+ return fullText;
49
+ }
50
+
51
+ /**
52
+ * Non-streaming chat completion.
53
+ * @param {Array<{role:string, content:string}>} messages
54
+ * @returns {Promise<string>} Full response text
55
+ */
56
+ async complete(messages) {
57
+ const completion = await this.client.chat.completions.create({
58
+ model: this.model,
59
+ messages,
60
+ temperature: this.temperature,
61
+ });
62
+ return completion.choices[0]?.message?.content || '';
63
+ }
64
+ }
65
+
66
+ module.exports = OpenAIClient;
67
+
@@ -0,0 +1,86 @@
1
+ const fs = require('fs');
2
+ const path = require('path');
3
+
4
+ class MetapromptPersistence {
5
+ constructor(config) {
6
+ this.config = config;
7
+ this.projectDir = config.getProjectDir();
8
+ }
9
+
10
+ getPaths(slug) {
11
+ const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
12
+ return {
13
+ metaprompt: path.join(this.projectDir, '.sage', 'metaprompts', `${slug}.md`),
14
+ skill: path.join(this.projectDir, 'prompts', 'skills', `${slug}.md`),
15
+ history: path.join(this.projectDir, '.sage', 'metaprompts', 'history', `${slug}-${timestamp}.json`),
16
+ };
17
+ }
18
+
19
+ saveMetaprompt(slug, data) {
20
+ const paths = this.getPaths(slug);
21
+
22
+ // Ensure dirs
23
+ fs.mkdirSync(path.dirname(paths.metaprompt), { recursive: true });
24
+ fs.mkdirSync(path.dirname(paths.history), { recursive: true });
25
+
26
+ // 1. Save history JSON (The "Metaprompt Artifact")
27
+ fs.writeFileSync(paths.history, JSON.stringify(data, null, 2));
28
+
29
+ // 2. Save readable Markdown Metaprompt (Source of Truth)
30
+ const mdContent = `---
31
+ slug: ${slug}
32
+ template: ${data.templateKey}
33
+ date: ${new Date().toISOString()}
34
+ ---
35
+
36
+ # Metaprompt Session: ${slug}
37
+
38
+ ## Transcript
39
+ ${data.transcript.map(t => `**${t.role.toUpperCase()}**: ${t.content}`).join('\n\n')}
40
+
41
+ ## Filled Slots
42
+ \`\`\`json
43
+ ${JSON.stringify(data.answers, null, 2)}
44
+ \`\`\`
45
+ `;
46
+ fs.writeFileSync(paths.metaprompt, mdContent);
47
+
48
+ return paths;
49
+ }
50
+
51
+ saveSkill(slug, systemPrompt) {
52
+ const paths = this.getPaths(slug);
53
+ fs.mkdirSync(path.dirname(paths.skill), { recursive: true });
54
+
55
+ const content = `---
56
+ title: ${slug} (Generated Agent)
57
+ description: Generated via sage interview
58
+ ---
59
+
60
+ ${systemPrompt}
61
+ `;
62
+ fs.writeFileSync(paths.skill, content);
63
+ return paths.skill;
64
+ }
65
+
66
+ appendToAgentsList(slug) {
67
+ const agentsPath = path.join(this.projectDir, 'prompts', 'agents.md');
68
+ const dir = path.dirname(agentsPath);
69
+ if (!fs.existsSync(dir)) return;
70
+
71
+ const entry = `- [${slug}](./skills/${slug}.md)`;
72
+ let existing = '';
73
+ if (fs.existsSync(agentsPath)) {
74
+ try {
75
+ existing = fs.readFileSync(agentsPath, 'utf8');
76
+ if (existing.includes(entry)) return; // avoid duplicates
77
+ } catch (_) {
78
+ existing = '';
79
+ }
80
+ }
81
+ const prefix = existing && !existing.endsWith('\n') ? '\n' : '';
82
+ fs.appendFileSync(agentsPath, `${prefix}${entry}\n`);
83
+ }
84
+ }
85
+
86
+ module.exports = MetapromptPersistence;
@@ -0,0 +1,186 @@
1
+ const templates = require('./templates.json');
2
+
3
+ /**
4
+ * Mapping of slot groups to section names and order.
5
+ * This normalizes the various group names used in templates.
6
+ */
7
+ const GROUP_MAPPINGS = {
8
+ // Identity-related
9
+ 'identity': 'identity',
10
+ 'role': 'identity',
11
+
12
+ // Goals-related
13
+ 'goals': 'goals',
14
+ 'goal': 'goals',
15
+
16
+ // Context-related
17
+ 'context': 'context',
18
+ 'stack': 'context',
19
+
20
+ // Constraints-related
21
+ 'constraints': 'constraints',
22
+ 'guidelines': 'constraints',
23
+
24
+ // Tools-related
25
+ 'tools': 'tools',
26
+
27
+ // Output-related
28
+ 'output': 'output',
29
+ 'format': 'output',
30
+ };
31
+
32
+ /**
33
+ * Tool definitions for inject_tools feature.
34
+ * Maps tool names to descriptions.
35
+ */
36
+ const TOOL_DEFINITIONS = {
37
+ 'sage prompts': '`sage prompts`: Workspace-first prompt and skill management (init, list, diff, publish).',
38
+ 'sage project': '`sage project`: Manifest management, IPFS publishing, and governance proposal helpers.',
39
+ 'sage governance': '`sage governance`: Governance commands for viewing proposals, voting, and managing SubDAOs.',
40
+ 'sage gov': '`sage gov`: Alias for sage governance - proposal management and voting.',
41
+ 'sage treasury': '`sage treasury`: Treasury inspection, withdrawals, boosts, and bond operations.',
42
+ 'sage ipfs': '`sage ipfs`: IPFS operations including pinning, fetching, and gateway management.',
43
+ 'sage dev': '`sage dev`: Development commands for building, testing, and deploying.',
44
+ 'sage config': '`sage config`: Configuration management for RPC, wallets, and AI providers.',
45
+ 'sage mcp': '`sage mcp`: MCP server management for agent integrations.',
46
+ };
47
+
48
+ class PromptBuilder {
49
+ constructor() {
50
+ // Could be extended to load custom tool definitions
51
+ }
52
+
53
+ /**
54
+ * Builds the final system prompt string from the filled slots.
55
+ * @param {string} templateKey
56
+ * @param {Array} slots - Definition of slots
57
+ * @param {Object} answers - Key-value pairs of filled answers
58
+ * @returns {string} The final XML-structured system prompt
59
+ */
60
+ buildSystemPrompt(templateKey, slots, answers) {
61
+ const template = templates[templateKey] || templates['custom'];
62
+
63
+ // Build sections in order
64
+ const sections = [];
65
+
66
+ // 1. Role/Identity section
67
+ const identityContent = this.#buildGroupSection(slots, answers, 'identity');
68
+ const roleContent = this.#getAnswer(answers, 'role') || identityContent || template.description;
69
+ if (roleContent) {
70
+ sections.push(this.#buildSection('role', roleContent));
71
+ }
72
+
73
+ // 2. Goals section
74
+ const goalsContent = this.#buildGroupSection(slots, answers, 'goals');
75
+ const goalContent = this.#getAnswer(answers, 'goal') || goalsContent || template.default_goal;
76
+ if (goalContent) {
77
+ sections.push(this.#buildSection('goals', goalContent));
78
+ }
79
+
80
+ // 3. Context section (tech stack, data sources, etc.)
81
+ const contextSection = this.#buildGroupSection(slots, answers, 'context');
82
+ if (contextSection) {
83
+ sections.push(this.#buildSection('context', contextSection));
84
+ }
85
+
86
+ // 4. Constraints section
87
+ const constraintsSection = this.#buildGroupSection(slots, answers, 'constraints');
88
+ if (constraintsSection) {
89
+ sections.push(this.#buildSection('constraints', constraintsSection));
90
+ }
91
+
92
+ // 5. Tools section
93
+ const toolsSection = this.#buildToolsSection(template.inject_tools, answers, slots);
94
+ if (toolsSection) {
95
+ sections.push(toolsSection);
96
+ }
97
+
98
+ // 6. Output format section
99
+ const outputSection = this.#buildGroupSection(slots, answers, 'output');
100
+ if (outputSection) {
101
+ sections.push(this.#buildSection('output_format', outputSection));
102
+ }
103
+
104
+ // 7. Final reminder
105
+ sections.push(`Remember: You are ${template.name}. Stay in character and follow your constraints.`);
106
+
107
+ return sections.filter(Boolean).join('\n\n');
108
+ }
109
+
110
+ #getAnswer(answers, key) {
111
+ return answers[key] || '';
112
+ }
113
+
114
+ #buildSection(title, content) {
115
+ if (!content) return '';
116
+ const tag = this.#sanitizeTag(title);
117
+ return `<${tag}>\n${content}\n</${tag}>`;
118
+ }
119
+
120
+ #sanitizeTag(title) {
121
+ // Remove special chars, replace spaces with underscores, lowercase
122
+ return title.toLowerCase().replace(/[^a-z0-9_]/g, '_').replace(/_+/g, '_');
123
+ }
124
+
125
+ /**
126
+ * Build a section from all slots matching a normalized group.
127
+ */
128
+ #buildGroupSection(slots, answers, normalizedGroup) {
129
+ // Find slots matching this group (handling group name variations)
130
+ const groupSlots = slots.filter(slot => {
131
+ const slotGroup = (slot.group || '').toLowerCase();
132
+ const mapped = GROUP_MAPPINGS[slotGroup] || slotGroup;
133
+ return mapped === normalizedGroup;
134
+ });
135
+
136
+ if (!groupSlots.length) return '';
137
+
138
+ const lines = groupSlots.map(slot => {
139
+ const ans = answers[slot.key];
140
+ if (!ans) return null;
141
+ return `- **${slot.label}**: ${ans}`;
142
+ }).filter(Boolean);
143
+
144
+ return lines.length ? lines.join('\n') : '';
145
+ }
146
+
147
+ #buildToolsSection(injectedTools = [], answers, slots) {
148
+ const definitions = [];
149
+
150
+ // Add injected tools from template
151
+ for (const tool of injectedTools) {
152
+ const def = TOOL_DEFINITIONS[tool];
153
+ if (def) {
154
+ definitions.push(`- ${def}`);
155
+ } else {
156
+ // Unknown tool - add a generic entry
157
+ definitions.push(`- \`${tool}\`: Available tool (see documentation).`);
158
+ }
159
+ }
160
+
161
+ // Add tools from slots with 'tools' group
162
+ const toolSlots = slots.filter(s => GROUP_MAPPINGS[s.group] === 'tools');
163
+ for (const slot of toolSlots) {
164
+ const ans = answers[slot.key];
165
+ if (ans && !definitions.some(d => d.includes(ans))) {
166
+ definitions.push(`- ${ans}`);
167
+ }
168
+ }
169
+
170
+ // Add generic tool answer if provided
171
+ if (answers.tools && !definitions.some(d => d.includes(answers.tools))) {
172
+ definitions.push(`- ${answers.tools}`);
173
+ }
174
+
175
+ if (!definitions.length) return '';
176
+
177
+ return `<tools>
178
+ You have access to the following tools and capabilities:
179
+ ${definitions.join('\n')}
180
+
181
+ Use these tools when appropriate to fulfill the user's request.
182
+ </tools>`;
183
+ }
184
+ }
185
+
186
+ module.exports = PromptBuilder;
@@ -1,32 +1,33 @@
1
+ const { createModelClient } = require('./model-client');
2
+
1
3
  class MetapromptSession {
2
4
  constructor(options = {}) {
3
5
  const {
4
6
  title,
5
7
  goal,
6
- targetModel = 'gpt-5',
7
8
  interviewStyle = 'one-question-at-a-time',
8
- apiKey,
9
9
  temperature = 0.7,
10
- axios,
11
- process,
12
- env,
13
- openAiModel = (env || process?.env || global.process.env).SAGE_METAPROMPT_MODEL || 'gpt-4o-mini',
10
+ process: proc,
11
+ config, // injected config dependency
12
+ provider, // optional override
13
+ model, // optional override
14
+ apiKey, // optional override
14
15
  } = options;
15
16
 
16
- this.axios = axios || require('axios');
17
- this.process = process || global.process;
18
- this.env = env || this.process.env;
17
+ this.process = proc || global.process;
18
+ this.config = config || require('../../config'); // fallback to global require if not passed
19
19
 
20
- if (!apiKey) {
21
- throw new Error('Missing OpenAI API key. Configure one with `sage config ai set --provider openai --key sk-...` or set OPENAI_API_KEY.');
20
+ // Initialize the ModelClient using the factory
21
+ try {
22
+ this.client = createModelClient(this.config, { provider, model, apiKey });
23
+ } catch (error) {
24
+ // If createModelClient fails (no keys), re-throw with the helpful message
25
+ throw error;
22
26
  }
23
27
 
24
28
  this.title = title || 'Untitled Metaprompt';
25
29
  this.goal = goal || 'Design a high quality system prompt.';
26
- this.targetModel = targetModel;
27
- this.openAiModel = openAiModel;
28
30
  this.interviewStyle = interviewStyle;
29
- this.apiKey = apiKey;
30
31
  this.temperature = temperature;
31
32
 
32
33
  this.messages = [
@@ -48,7 +49,6 @@ class MetapromptSession {
48
49
  'Before emitting FINAL_PROMPT, summarise the agreed requirements and confirm readiness.',
49
50
  'If the user types `/finish` or explicitly asks you to finalise, produce the FINAL_PROMPT immediately.',
50
51
  `Primary goal: ${this.goal}.`,
51
- `Target downstream model: ${this.targetModel}.`,
52
52
  `Interview cadence preference: ${this.interviewStyle}.`,
53
53
  'Guardrails: capture constraints, desired behaviours, failure modes, tone, tooling, fallback defaults, and evaluation hooks.',
54
54
  'When providing FINAL_PROMPT, include only the polished system prompt inside the fenced block (no commentary).',
@@ -66,55 +66,6 @@ class MetapromptSession {
66
66
  this.transcript.push({ role, content, timestamp: new Date().toISOString() });
67
67
  }
68
68
 
69
- async requestAssistantMessage(onToken) {
70
- const response = await this.axios.post(
71
- 'https://api.openai.com/v1/chat/completions',
72
- {
73
- model: this.openAiModel,
74
- messages: this.messages,
75
- temperature: this.temperature,
76
- stream: true,
77
- },
78
- {
79
- headers: {
80
- Authorization: `Bearer ${this.apiKey}`,
81
- 'Content-Type': 'application/json',
82
- },
83
- responseType: 'stream',
84
- }
85
- );
86
-
87
- return new Promise((resolve, reject) => {
88
- let buffer = '';
89
- let fullText = '';
90
- response.data.on('data', (chunk) => {
91
- buffer += chunk.toString('utf8');
92
- const lines = buffer.split('\n');
93
- buffer = lines.pop() || '';
94
- for (const line of lines) {
95
- const trimmed = line.trim();
96
- if (!trimmed.startsWith('data:')) continue;
97
- const payload = trimmed.slice(5).trim();
98
- if (!payload || payload === '[DONE]') continue;
99
- try {
100
- const parsed = JSON.parse(payload);
101
- const delta = parsed.choices?.[0]?.delta || {};
102
- if (typeof delta.content === 'string' && delta.content.length) {
103
- fullText += delta.content;
104
- if (typeof onToken === 'function') onToken(delta.content);
105
- }
106
- if (delta.role === 'assistant' && typeof onToken === 'function') {
107
- onToken('');
108
- }
109
- } catch (_) {}
110
- }
111
- });
112
-
113
- response.data.on('end', () => resolve(fullText.trim()));
114
- response.data.on('error', (error) => reject(error));
115
- });
116
- }
117
-
118
69
  extractFinalPrompt(text) {
119
70
  if (!text) return null;
120
71
  const match = /FINAL_PROMPT\s*```(?:[a-zA-Z0-9_-]+\n)?([\s\S]*?)```/i.exec(text);
@@ -131,24 +82,13 @@ class MetapromptSession {
131
82
  this.#appendTranscript('user', this.messages[1].content);
132
83
 
133
84
  while (!this.finalPrompt) {
134
- let assistantMessage;
85
+ let assistantMessage = '';
135
86
  try {
136
- assistantMessage = await this.requestAssistantMessage((token) => {
87
+ assistantMessage = await this.client.streamChat(this.messages, (token) => {
137
88
  if (token) printAssistant(token);
138
89
  });
139
90
  } catch (error) {
140
- const status = error?.response?.status;
141
- let detail = error?.response?.data?.error?.message;
142
- if (!detail && error?.response?.data) {
143
- try {
144
- detail = JSON.stringify(error.response.data);
145
- } catch (_) {}
146
- }
147
- if (!detail) detail = error.message;
148
- const hint = detail && detail.includes('model')
149
- ? ' (hint: set SAGE_METAPROMPT_MODEL or choose a supported OpenAI model)'
150
- : '';
151
- throw new Error(status ? `OpenAI error (${status}): ${detail}${hint}` : `OpenAI error: ${detail}${hint}`);
91
+ throw new Error(`AI Model error: ${error.message}`);
152
92
  }
153
93
 
154
94
  if (assistantMessage) {
@@ -192,8 +132,6 @@ class MetapromptSession {
192
132
  finalAssistantMessage: this.finalAssistantMessage,
193
133
  transcript: this.transcript,
194
134
  messages: this.messages,
195
- targetModel: this.targetModel,
196
- openAiModel: this.openAiModel,
197
135
  };
198
136
  }
199
137
  }
@@ -0,0 +1,115 @@
1
+ const { createModelClient } = require('./model-client');
2
+ const templates = require('./templates.json');
3
+
4
+ class SlotPlanner {
5
+ /**
6
+ * @param {object} config CLI config object
7
+ * @param {object} [options]
8
+ * @param {object} [options.client] optional shared ModelClient instance
9
+ * @param {string} [options.provider]
10
+ * @param {string} [options.model]
11
+ * @param {string} [options.apiKey]
12
+ */
13
+ constructor(config, options = {}) {
14
+ this.config = config;
15
+ this.overrides = {
16
+ provider: options.provider,
17
+ model: options.model,
18
+ apiKey: options.apiKey,
19
+ };
20
+ this.client = options.client || null;
21
+ }
22
+
23
+ /**
24
+ * Generates a plan of slots (information requirements) for the persona.
25
+ * @param {string} templateKey - The key of the template (e.g., 'coding-assistant').
26
+ * @param {string} initialDescription - The user's initial input/goal.
27
+ * @param {string[]} [contextTags] - Optional tags to guide planning.
28
+ * @returns {Promise<Array>} Array of slot objects.
29
+ */
30
+ async planSlots(templateKey, initialDescription, contextTags = []) {
31
+ const template = templates[templateKey] || templates['custom'];
32
+ const recommended = template.recommended_slots || [];
33
+
34
+ // For standard personas, default to static recommended slots to avoid unnecessary LLM calls.
35
+ // Use the planner primarily for the "custom" template (or when explicitly configured).
36
+ if (templateKey !== 'custom') {
37
+ return recommended.slice().sort((a, b) => a.priority - b.priority);
38
+ }
39
+
40
+ // System prompt for the planner LLM
41
+ const systemPrompt = `
42
+ You are an expert Metaprompt Architect. Your goal is to design a structured "Slot Plan" for a new AI Persona.
43
+ A "Slot" is a specific piece of information we need to collect from the user to build a high-quality system prompt.
44
+
45
+ Template Context: ${template.name} - ${template.description}
46
+ Planner Instructions: ${template.planner_instructions}
47
+
48
+ Existing Recommended Slots:
49
+ ${JSON.stringify(recommended, null, 2)}
50
+
51
+ User's Initial Description: "${initialDescription}"
52
+ Context Tags: ${contextTags.join(', ')}
53
+
54
+ Your Task:
55
+ 1. Analyze the User's Description and the Template Instructions.
56
+ 2. Determine if the Recommended Slots are sufficient, or if new custom slots are needed.
57
+ 3. If the user has *already* provided information for a slot in their description, you should still list the slot but mark it as potentially filled (this logic happens later, just define the schema now).
58
+ 4. Return a JSON array of Slot objects.
59
+
60
+ Slot Schema:
61
+ {
62
+ "key": "snake_case_key",
63
+ "label": "Human Readable Label",
64
+ "description": "Question/Prompt to ask the user to fill this slot",
65
+ "required": boolean,
66
+ "priority": number (1=Critical, 5=Nice to have),
67
+ "group": "identity" | "context" | "constraints" | "tools" | "output"
68
+ }
69
+
70
+ CRITICAL: Return ONLY the JSON array. No markdown formatting, no code blocks.
71
+ `;
72
+
73
+ const messages = [
74
+ { role: 'system', content: systemPrompt.trim() },
75
+ { role: 'user', content: 'Generate the slot plan.' }
76
+ ];
77
+
78
+ try {
79
+ if (!this.client) {
80
+ this.client = createModelClient(this.config, this.overrides);
81
+ }
82
+ const response = await this.client.complete(messages);
83
+ let plan = [];
84
+
85
+ // clean potential markdown blocks
86
+ const cleanJson = response.replace(/```json/g, '').replace(/```/g, '').trim();
87
+
88
+ try {
89
+ plan = JSON.parse(cleanJson);
90
+ } catch (e) {
91
+ // Fallback: if JSON parse fails, just use recommended slots
92
+ console.warn('SlotPlanner: Failed to parse LLM response, using defaults.', e.message);
93
+ return recommended;
94
+ }
95
+
96
+ // Merge recommended slots if they aren't present (or if LLM hallucinated them away)
97
+ // Ideally the LLM includes them, but let's ensure critical ones exist.
98
+ for (const rec of recommended) {
99
+ if (!plan.find(p => p.key === rec.key)) {
100
+ plan.push(rec);
101
+ }
102
+ }
103
+
104
+ // Sort by priority
105
+ return plan.sort((a, b) => a.priority - b.priority);
106
+
107
+ } catch (error) {
108
+ console.error('SlotPlanner Error:', error);
109
+ // Fallback to recommended
110
+ return recommended;
111
+ }
112
+ }
113
+ }
114
+
115
+ module.exports = SlotPlanner;