@operor/cli 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/README.md +76 -0
  2. package/dist/config-Bn2pbORi.js +34 -0
  3. package/dist/config-Bn2pbORi.js.map +1 -0
  4. package/dist/converse-C_PB7-JH.js +142 -0
  5. package/dist/converse-C_PB7-JH.js.map +1 -0
  6. package/dist/doctor-98gPl743.js +122 -0
  7. package/dist/doctor-98gPl743.js.map +1 -0
  8. package/dist/index.d.ts +1 -0
  9. package/dist/index.js +2268 -0
  10. package/dist/index.js.map +1 -0
  11. package/dist/llm-override-BIQl0V6H.js +445 -0
  12. package/dist/llm-override-BIQl0V6H.js.map +1 -0
  13. package/dist/reset-DT8SBgFS.js +87 -0
  14. package/dist/reset-DT8SBgFS.js.map +1 -0
  15. package/dist/simulate-BKv62GJc.js +144 -0
  16. package/dist/simulate-BKv62GJc.js.map +1 -0
  17. package/dist/status-D6LIZvQa.js +82 -0
  18. package/dist/status-D6LIZvQa.js.map +1 -0
  19. package/dist/test-DYjkxbtK.js +177 -0
  20. package/dist/test-DYjkxbtK.js.map +1 -0
  21. package/dist/test-suite-D8H_5uKs.js +209 -0
  22. package/dist/test-suite-D8H_5uKs.js.map +1 -0
  23. package/dist/utils-BuV4q7f6.js +11 -0
  24. package/dist/utils-BuV4q7f6.js.map +1 -0
  25. package/dist/vibe-Bl_js3Jo.js +395 -0
  26. package/dist/vibe-Bl_js3Jo.js.map +1 -0
  27. package/package.json +43 -0
  28. package/src/commands/analytics.ts +408 -0
  29. package/src/commands/chat.ts +310 -0
  30. package/src/commands/config.ts +34 -0
  31. package/src/commands/converse.ts +182 -0
  32. package/src/commands/doctor.ts +154 -0
  33. package/src/commands/history.ts +60 -0
  34. package/src/commands/init.ts +163 -0
  35. package/src/commands/kb.ts +429 -0
  36. package/src/commands/llm-override.ts +480 -0
  37. package/src/commands/reset.ts +72 -0
  38. package/src/commands/simulate.ts +187 -0
  39. package/src/commands/status.ts +112 -0
  40. package/src/commands/test-suite.ts +247 -0
  41. package/src/commands/test.ts +177 -0
  42. package/src/commands/vibe.ts +478 -0
  43. package/src/config.ts +127 -0
  44. package/src/index.ts +190 -0
  45. package/src/log-timestamps.ts +26 -0
  46. package/src/setup.ts +712 -0
  47. package/src/start.ts +573 -0
  48. package/src/utils.ts +6 -0
  49. package/templates/agents/_defaults/SOUL.md +20 -0
  50. package/templates/agents/_defaults/USER.md +16 -0
  51. package/templates/agents/customer-support/IDENTITY.md +6 -0
  52. package/templates/agents/customer-support/INSTRUCTIONS.md +79 -0
  53. package/templates/agents/customer-support/SOUL.md +26 -0
  54. package/templates/agents/faq-bot/IDENTITY.md +6 -0
  55. package/templates/agents/faq-bot/INSTRUCTIONS.md +53 -0
  56. package/templates/agents/faq-bot/SOUL.md +19 -0
  57. package/templates/agents/sales/IDENTITY.md +6 -0
  58. package/templates/agents/sales/INSTRUCTIONS.md +67 -0
  59. package/templates/agents/sales/SOUL.md +20 -0
  60. package/tsconfig.json +9 -0
  61. package/tsdown.config.ts +13 -0
  62. package/vitest.config.ts +8 -0
@@ -0,0 +1,177 @@
1
+ import { readConfig } from '../config.js';
2
+ import { writeFileSync } from 'fs';
3
+ import { formatTimestamp } from '../utils.js';
4
+
5
+ export async function runTest(options: {
6
+ csv?: string;
7
+ tag?: string;
8
+ report?: string;
9
+ real?: boolean;
10
+ allowWrites?: boolean;
11
+ dryRun?: boolean;
12
+ } = {}): Promise<void> {
13
+ const config = readConfig();
14
+
15
+ const { Operor } = await import('@operor/core');
16
+ const { MockProvider } = await import('@operor/provider-mock');
17
+ const { MockShopifySkill } = await import('@operor/testing');
18
+
19
+ // If --csv is provided, run CSV test suite mode
20
+ if (options.csv) {
21
+ const { CSVLoader, TestSuiteRunner, SkillTestHarness } = await import('@operor/testing');
22
+
23
+ const os = new Operor({ debug: false, batchWindowMs: 0 });
24
+ const provider = new MockProvider();
25
+ const shopify = new MockShopifySkill();
26
+
27
+ // Wrap skill with safety harness if --real or --dry-run
28
+ let skill: any = shopify;
29
+ if (options.real || options.dryRun) {
30
+ skill = new SkillTestHarness(shopify, {
31
+ allowWrites: options.allowWrites ?? false,
32
+ dryRun: options.dryRun ?? false,
33
+ });
34
+ await skill.authenticate();
35
+ }
36
+
37
+ await os.addProvider(provider);
38
+ await os.addSkill(skill);
39
+
40
+ os.createAgent({
41
+ name: 'Test Agent',
42
+ purpose: 'Handle customer support conversations',
43
+ personality: 'empathetic and solution-focused',
44
+ triggers: ['order_tracking', 'general'],
45
+ tools: [shopify.tools.get_order, shopify.tools.create_discount, shopify.tools.search_products],
46
+ rules: [{
47
+ name: 'Auto-compensation',
48
+ condition: async (_ctx: any, toolResults: any[]) => {
49
+ const order = toolResults.find((t) => t.name === 'get_order');
50
+ return order?.success && order.result?.isDelayed && order.result?.delayDays >= 2;
51
+ },
52
+ action: async () => {
53
+ const discount = await shopify.tools.create_discount.execute({ percent: 10, validDays: 30 });
54
+ return { type: 'discount_created', code: discount.code, percent: 10, validDays: 30 };
55
+ },
56
+ }],
57
+ });
58
+
59
+ await os.start();
60
+
61
+ let testCases = await CSVLoader.fromFile(options.csv);
62
+
63
+ // Filter by tag if specified
64
+ if (options.tag) {
65
+ testCases = testCases.filter((tc) =>
66
+ tc.tags?.some((t) => t.toLowerCase() === options.tag!.toLowerCase())
67
+ );
68
+ if (testCases.length === 0) {
69
+ console.error(`No test cases found with tag "${options.tag}"`);
70
+ process.exit(1);
71
+ }
72
+ }
73
+
74
+ console.log(`\n Running ${testCases.length} test case(s) from ${options.csv}...\n`);
75
+
76
+ // Set up LLM if available for evaluation
77
+ let llm: any;
78
+ if (config.LLM_PROVIDER && config.LLM_API_KEY) {
79
+ const { AIProvider } = await import('@operor/llm');
80
+ llm = new AIProvider({
81
+ provider: config.LLM_PROVIDER as any,
82
+ apiKey: config.LLM_API_KEY,
83
+ model: config.LLM_MODEL,
84
+ });
85
+ }
86
+
87
+ const suiteRunner = new TestSuiteRunner({
88
+ agentOS: os,
89
+ llm,
90
+ });
91
+
92
+ const result = await suiteRunner.runSuite(testCases);
93
+ await os.stop();
94
+
95
+ // Display results
96
+ for (const r of result.results) {
97
+ const icon = r.evaluation.passed ? '✓' : '✗';
98
+ const status = r.evaluation.passed ? 'PASS' : 'FAIL';
99
+ console.log(` ${icon} [${formatTimestamp()}] [${status}] ${r.testCase.id}: ${r.testCase.question}`);
100
+ if (!r.evaluation.passed) {
101
+ console.log(` Score: ${r.evaluation.score.toFixed(2)} — ${r.evaluation.reasoning}`);
102
+ }
103
+ }
104
+
105
+ console.log(`\n Results: ${result.passed}/${result.total} passed (avg score: ${result.averageScore.toFixed(2)})`);
106
+ console.log(` Duration: ${(result.totalDuration / 1000).toFixed(1)}s`);
107
+ console.log(` Status: ${result.failed === 0 ? '✓ ALL PASSED' : '✗ SOME FAILED'}\n`);
108
+
109
+ if (options.report) {
110
+ writeFileSync(options.report, JSON.stringify(result, null, 2), 'utf-8');
111
+ console.log(` Report saved to ${options.report}\n`);
112
+ }
113
+
114
+ process.exit(result.failed === 0 ? 0 : 1);
115
+ return;
116
+ }
117
+
118
+ // Default: run built-in demo scenarios
119
+ console.log('\n Running Operor test scenarios...\n');
120
+
121
+ const os = new Operor({ debug: true, batchWindowMs: 0 });
122
+ const provider = new MockProvider();
123
+ const shopify = new MockShopifySkill();
124
+
125
+ await os.addProvider(provider);
126
+ await os.addSkill(shopify);
127
+
128
+ os.createAgent({
129
+ name: 'Test Agent',
130
+ purpose: 'Validate agent processing pipeline',
131
+ personality: 'helpful',
132
+ triggers: ['order_tracking', 'general'],
133
+ tools: [shopify.tools.get_order, shopify.tools.create_discount],
134
+ rules: [{
135
+ name: 'Auto-compensation',
136
+ condition: async (_ctx: any, toolResults: any[]) => {
137
+ const order = toolResults.find(t => t.name === 'get_order');
138
+ return order?.success && order.result?.isDelayed && order.result?.delayDays >= 2;
139
+ },
140
+ action: async () => {
141
+ const discount = await shopify.tools.create_discount.execute({ percent: 10, validDays: 30 });
142
+ return { type: 'discount_created', code: discount.code, percent: 10, validDays: 30 };
143
+ },
144
+ }],
145
+ });
146
+
147
+ let processed = 0;
148
+ const total = 4;
149
+ const done = new Promise<void>(resolve => {
150
+ os.on('message:processed', () => { if (++processed >= total) resolve(); });
151
+ });
152
+
153
+ await os.start();
154
+
155
+ const scenarios = [
156
+ { phone: '+1', msg: 'Where is my order #12345?', label: 'Delayed order (auto-compensation)' },
157
+ { phone: '+2', msg: 'Check order #67890', label: 'On-time order' },
158
+ { phone: '+3', msg: 'Where is order #99999?', label: 'Order not found' },
159
+ { phone: '+4', msg: 'Hello!', label: 'General greeting' },
160
+ ];
161
+
162
+ for (const s of scenarios) {
163
+ console.log(` [${formatTimestamp()}] Test: ${s.label}`);
164
+ provider.simulateIncomingMessage(s.phone, s.msg);
165
+ await new Promise(r => setTimeout(r, 100));
166
+ }
167
+
168
+ await done;
169
+ await os.stop();
170
+
171
+ const discounts = shopify.getDiscounts();
172
+ console.log(`\n Results: ${processed}/${total} messages processed`);
173
+ console.log(` Discounts created: ${discounts.length}`);
174
+ console.log(` Status: ${processed === total ? '✓ PASS' : '✗ FAIL'}\n`);
175
+
176
+ process.exit(processed === total ? 0 : 1);
177
+ }
@@ -0,0 +1,478 @@
1
+ import * as readline from 'node:readline';
2
+ import * as fs from 'node:fs';
3
+ import * as path from 'node:path';
4
+ import { readConfig } from '../config.js';
5
+
6
+ interface VibeOptions {
7
+ action?: string;
8
+ agent?: string;
9
+ }
10
+
11
+ interface LLMMessage {
12
+ role: 'system' | 'user' | 'assistant';
13
+ content: string;
14
+ }
15
+
16
+ const WELCOME = `
17
+ Welcome to Operor Vibe -- your agent copilot!
18
+
19
+ I can help you with:
20
+
21
+ 1. Create Create a new agent from a description
22
+ 2. Customize Update an agent's instructions and behavior
23
+ 3. Connect Add skills, knowledge sources, and tools
24
+ 4. Improve Refine responses based on feedback
25
+
26
+ What would you like to do?
27
+ `;
28
+
29
+ const AGENTS_DIR = path.resolve(process.cwd(), 'agents');
30
+
31
+ const META_PROMPT = `You are Operor Vibe, an AI copilot that helps users create and customize AI agents.
32
+
33
+ You generate file-based agent definitions for the Operor framework. Each agent lives in an agents/<name>/ directory with these files:
34
+
35
+ ## INSTRUCTIONS.md Format
36
+
37
+ YAML frontmatter followed by markdown body:
38
+
39
+ \`\`\`
40
+ ---
41
+ name: <agent-slug>
42
+ purpose: <one-line description>
43
+ triggers:
44
+ - <intent keywords that route to this agent>
45
+ channels: # optional — omit to handle all channels
46
+ - whatsapp
47
+ - telegram
48
+ - wati
49
+ skills: # optional — configured via mcp.json
50
+ - shopify
51
+ - stripe
52
+ - github
53
+ knowledgeBase: false # set true to enable KB retrieval
54
+ guardrails: # optional
55
+ maxResponseLength: 1500
56
+ blockedTopics: []
57
+ escalationTriggers: []
58
+ systemRules: []
59
+ ---
60
+
61
+ ## Role
62
+ <What this agent does and who it serves>
63
+
64
+ ## Scope
65
+ <Bullet list of what the agent CAN do>
66
+
67
+ ## Out of Scope
68
+ <Bullet list of what the agent should NOT do>
69
+
70
+ ## Style
71
+ <Communication style guidelines>
72
+
73
+ ## Escalation
74
+ <When and how to hand off to humans>
75
+
76
+ ## Examples
77
+ <2-3 example user/agent exchanges>
78
+ \`\`\`
79
+
80
+ ## IDENTITY.md Format
81
+ Short personality card:
82
+ - Name, Emoji, Nature, Vibe
83
+
84
+ ## SOUL.md Format
85
+ Deeper behavioral guide:
86
+ - Core Principles, Boundaries, Tone, When Things Go Wrong
87
+
88
+ ## Available skills
89
+ configured via mcp.json (e.g. shopify, stripe, github)
90
+
91
+ ## Available channels
92
+ whatsapp, telegram, wati
93
+
94
+ ## Your behavior
95
+ - Be conversational and friendly
96
+ - When the user describes what they want, ask 2-3 clarifying questions naturally before generating
97
+ - Questions should cover: target audience, tone/personality, key capabilities, skills needed
98
+ - When you have enough info, generate all three files
99
+ - Output each file clearly marked with these exact delimiters:
100
+ --- FILE: INSTRUCTIONS.md ---
101
+ (file content)
102
+ --- FILE: IDENTITY.md ---
103
+ (file content)
104
+ --- FILE: SOUL.md ---
105
+ (file content)
106
+ - After generating, summarize what you created`;
107
+
108
+ const CUSTOMIZE_PROMPT = `You are Operor Vibe, helping the user customize an existing agent.
109
+
110
+ The user will provide feedback about their agent. You have access to the current INSTRUCTIONS.md content.
111
+
112
+ - Ask clarifying questions if the feedback is vague
113
+ - When ready to apply changes, output the updated file with:
114
+ --- FILE: INSTRUCTIONS.md ---
115
+ (full updated content)
116
+ - Explain what you changed`;
117
+
118
+ const CONNECT_PROMPT = `You are Operor Vibe, helping the user connect skills and knowledge sources to an agent.
119
+
120
+ Available skills: configured via mcp.json (e.g. shopify, stripe, github)
121
+ Available channels: whatsapp, telegram, wati
122
+
123
+ The user will tell you what to add. You have access to the current INSTRUCTIONS.md content.
124
+
125
+ - Update the YAML frontmatter to add the requested skills/channels
126
+ - Output the updated file with:
127
+ --- FILE: INSTRUCTIONS.md ---
128
+ (full updated content)
129
+ - Explain what you connected`;
130
+
131
+ const IMPROVE_PROMPT = `You are Operor Vibe, helping the user improve an agent's responses.
132
+
133
+ The user will provide examples of bad responses and what they'd prefer instead.
134
+ You have access to the current INSTRUCTIONS.md content.
135
+
136
+ - Analyze the gap between actual and desired behavior
137
+ - Update the Examples section and/or Style section to address the issue
138
+ - Output the updated file with:
139
+ --- FILE: INSTRUCTIONS.md ---
140
+ (full updated content)
141
+ - Explain what you improved`;
142
+
143
+ function createLLM(config: ReturnType<typeof readConfig>) {
144
+ // Lazy import to avoid loading AI SDK at CLI startup
145
+ return import('@operor/llm').then(({ AIProvider }) => {
146
+ const maxTokens = config.VIBE_MAX_TOKENS ? Number(config.VIBE_MAX_TOKENS) : 4000;
147
+ return new AIProvider({
148
+ provider: config.LLM_PROVIDER as 'openai' | 'anthropic' | 'google' | 'groq' | 'ollama',
149
+ apiKey: config.LLM_API_KEY,
150
+ model: config.LLM_MODEL,
151
+ maxTokens,
152
+ });
153
+ });
154
+ }
155
+
156
+ function listAgents(): string[] {
157
+ if (!fs.existsSync(AGENTS_DIR)) return [];
158
+ return fs.readdirSync(AGENTS_DIR, { withFileTypes: true })
159
+ .filter(e => e.isDirectory() && !e.name.startsWith('_'))
160
+ .map(e => e.name);
161
+ }
162
+
163
+ function readAgentFile(agentName: string, fileName: string): string | null {
164
+ const filePath = path.join(AGENTS_DIR, agentName, fileName);
165
+ if (!fs.existsSync(filePath)) return null;
166
+ return fs.readFileSync(filePath, 'utf-8');
167
+ }
168
+
169
+ function writeAgentFile(agentName: string, fileName: string, content: string): void {
170
+ const dir = path.join(AGENTS_DIR, agentName);
171
+ fs.mkdirSync(dir, { recursive: true });
172
+ fs.writeFileSync(path.join(dir, fileName), content, 'utf-8');
173
+ }
174
+
175
+ export function parseGeneratedFiles(text: string): Record<string, string> {
176
+ const files: Record<string, string> = {};
177
+ const pattern = /---\s*FILE:\s*(\S+)\s*---\n([\s\S]*?)(?=---\s*FILE:|$)/g;
178
+ let match: RegExpExecArray | null;
179
+ while ((match = pattern.exec(text)) !== null) {
180
+ const name = match[1].trim();
181
+ const content = match[2].trim();
182
+ if (name && content) files[name] = content;
183
+ }
184
+ return files;
185
+ }
186
+
187
+ export function extractAgentName(files: Record<string, string>): string {
188
+ const instructions = files['INSTRUCTIONS.md'] || '';
189
+ const nameMatch = instructions.match(/^name:\s*(.+)$/m);
190
+ if (nameMatch) return nameMatch[1].trim();
191
+ return 'new-agent';
192
+ }
193
+
194
+ function prompt(rl: readline.Interface, question: string): Promise<string> {
195
+ return new Promise((resolve) => {
196
+ rl.question(question, (answer) => resolve(answer.trim()));
197
+ });
198
+ }
199
+
200
+ async function conversationLoop(
201
+ rl: readline.Interface,
202
+ llm: { complete: (msgs: LLMMessage[], opts?: any) => Promise<{ text: string }> },
203
+ systemPrompt: string,
204
+ initialUserMessage?: string,
205
+ maxTokens = 4000,
206
+ ): Promise<{ history: LLMMessage[]; lastResponse: string }> {
207
+ const history: LLMMessage[] = [{ role: 'system', content: systemPrompt }];
208
+
209
+ if (initialUserMessage) {
210
+ history.push({ role: 'user', content: initialUserMessage });
211
+ process.stdout.write('\n Thinking...');
212
+ const response = await llm.complete(history, { maxTokens });
213
+ process.stdout.write('\r' + ' '.repeat(20) + '\r');
214
+ history.push({ role: 'assistant', content: response.text });
215
+ console.log(`\n Vibe: ${response.text}\n`);
216
+
217
+ // Check if files were generated
218
+ const files = parseGeneratedFiles(response.text);
219
+ if (Object.keys(files).length > 0) {
220
+ return { history, lastResponse: response.text };
221
+ }
222
+ }
223
+
224
+ // Continue conversation
225
+ while (true) {
226
+ const input = await prompt(rl, ' You: ');
227
+ if (!input || input === 'exit' || input === 'quit' || input === 'done') {
228
+ return { history, lastResponse: history[history.length - 1]?.content || '' };
229
+ }
230
+
231
+ history.push({ role: 'user', content: input });
232
+ process.stdout.write(' Thinking...');
233
+ const response = await llm.complete(history, { maxTokens });
234
+ process.stdout.write('\r' + ' '.repeat(20) + '\r');
235
+ history.push({ role: 'assistant', content: response.text });
236
+ console.log(`\n Vibe: ${response.text}\n`);
237
+
238
+ // Check if files were generated in this turn
239
+ const files = parseGeneratedFiles(response.text);
240
+ if (Object.keys(files).length > 0) {
241
+ return { history, lastResponse: response.text };
242
+ }
243
+ }
244
+ }
245
+
246
+ async function handleCreate(
247
+ rl: readline.Interface,
248
+ llm: { complete: (msgs: LLMMessage[], opts?: any) => Promise<{ text: string }> },
249
+ maxTokens: number,
250
+ ): Promise<void> {
251
+ console.log('\n Describe the agent you want to create:\n');
252
+ const description = await prompt(rl, ' You: ');
253
+ if (!description) return;
254
+
255
+ const { lastResponse } = await conversationLoop(rl, llm, META_PROMPT, description, maxTokens);
256
+ const files = parseGeneratedFiles(lastResponse);
257
+
258
+ if (Object.keys(files).length === 0) {
259
+ console.log('\n No agent files were generated. Try again with more detail.\n');
260
+ return;
261
+ }
262
+
263
+ const agentName = extractAgentName(files);
264
+ console.log(`\n Writing agent files to agents/${agentName}/\n`);
265
+
266
+ for (const [fileName, content] of Object.entries(files)) {
267
+ writeAgentFile(agentName, fileName, content);
268
+ console.log(` + agents/${agentName}/${fileName}`);
269
+ }
270
+
271
+ console.log(`\n Agent "${agentName}" created! Run "operor start" to use it.\n`);
272
+ }
273
+
274
+ async function handleCustomize(
275
+ rl: readline.Interface,
276
+ llm: { complete: (msgs: LLMMessage[], opts?: any) => Promise<{ text: string }> },
277
+ maxTokens: number,
278
+ targetAgent?: string,
279
+ ): Promise<void> {
280
+ const agents = listAgents();
281
+ if (agents.length === 0) {
282
+ console.log('\n No agents found. Create one first with "operor vibe create".\n');
283
+ return;
284
+ }
285
+
286
+ let agentName = targetAgent;
287
+ if (!agentName) {
288
+ console.log('\n Available agents:\n');
289
+ agents.forEach((a, i) => console.log(` ${i + 1}. ${a}`));
290
+ const choice = await prompt(rl, '\n Select an agent (name or number): ');
291
+ const idx = parseInt(choice, 10);
292
+ agentName = idx > 0 && idx <= agents.length ? agents[idx - 1] : choice;
293
+ }
294
+
295
+ const instructions = readAgentFile(agentName, 'INSTRUCTIONS.md');
296
+ if (!instructions) {
297
+ console.log(`\n Agent "${agentName}" not found or has no INSTRUCTIONS.md.\n`);
298
+ return;
299
+ }
300
+
301
+ console.log(`\n Customizing agent "${agentName}". What would you like to change?\n`);
302
+ const feedback = await prompt(rl, ' You: ');
303
+ if (!feedback) return;
304
+
305
+ const contextMessage = `Current INSTRUCTIONS.md for agent "${agentName}":\n\n${instructions}\n\nUser feedback: ${feedback}`;
306
+ const { lastResponse } = await conversationLoop(rl, llm, CUSTOMIZE_PROMPT, contextMessage, maxTokens);
307
+ const files = parseGeneratedFiles(lastResponse);
308
+
309
+ if (files['INSTRUCTIONS.md']) {
310
+ writeAgentFile(agentName, 'INSTRUCTIONS.md', files['INSTRUCTIONS.md']);
311
+ console.log(`\n Updated agents/${agentName}/INSTRUCTIONS.md\n`);
312
+ } else {
313
+ console.log('\n No changes generated.\n');
314
+ }
315
+ }
316
+
317
+ async function handleConnect(
318
+ rl: readline.Interface,
319
+ llm: { complete: (msgs: LLMMessage[], opts?: any) => Promise<{ text: string }> },
320
+ maxTokens: number,
321
+ targetAgent?: string,
322
+ ): Promise<void> {
323
+ const agents = listAgents();
324
+ if (agents.length === 0) {
325
+ console.log('\n No agents found. Create one first with "operor vibe create".\n');
326
+ return;
327
+ }
328
+
329
+ let agentName = targetAgent;
330
+ if (!agentName) {
331
+ console.log('\n Available agents:\n');
332
+ agents.forEach((a, i) => console.log(` ${i + 1}. ${a}`));
333
+ const choice = await prompt(rl, '\n Select an agent (name or number): ');
334
+ const idx = parseInt(choice, 10);
335
+ agentName = idx > 0 && idx <= agents.length ? agents[idx - 1] : choice;
336
+ }
337
+
338
+ const instructions = readAgentFile(agentName, 'INSTRUCTIONS.md');
339
+ if (!instructions) {
340
+ console.log(`\n Agent "${agentName}" not found or has no INSTRUCTIONS.md.\n`);
341
+ return;
342
+ }
343
+
344
+ console.log(`\n What would you like to connect to "${agentName}"?`);
345
+ console.log(' (skills: configured via mcp.json, e.g. shopify, stripe, github)');
346
+ console.log(' (channels: whatsapp, telegram, wati)\n');
347
+ const request = await prompt(rl, ' You: ');
348
+ if (!request) return;
349
+
350
+ const contextMessage = `Current INSTRUCTIONS.md for agent "${agentName}":\n\n${instructions}\n\nUser request: ${request}`;
351
+ const { lastResponse } = await conversationLoop(rl, llm, CONNECT_PROMPT, contextMessage, maxTokens);
352
+ const files = parseGeneratedFiles(lastResponse);
353
+
354
+ if (files['INSTRUCTIONS.md']) {
355
+ writeAgentFile(agentName, 'INSTRUCTIONS.md', files['INSTRUCTIONS.md']);
356
+ console.log(`\n Updated agents/${agentName}/INSTRUCTIONS.md\n`);
357
+ } else {
358
+ console.log('\n No changes generated.\n');
359
+ }
360
+ }
361
+
362
+ async function handleImprove(
363
+ rl: readline.Interface,
364
+ llm: { complete: (msgs: LLMMessage[], opts?: any) => Promise<{ text: string }> },
365
+ maxTokens: number,
366
+ targetAgent?: string,
367
+ ): Promise<void> {
368
+ const agents = listAgents();
369
+ if (agents.length === 0) {
370
+ console.log('\n No agents found. Create one first with "operor vibe create".\n');
371
+ return;
372
+ }
373
+
374
+ let agentName = targetAgent;
375
+ if (!agentName) {
376
+ console.log('\n Available agents:\n');
377
+ agents.forEach((a, i) => console.log(` ${i + 1}. ${a}`));
378
+ const choice = await prompt(rl, '\n Select an agent (name or number): ');
379
+ const idx = parseInt(choice, 10);
380
+ agentName = idx > 0 && idx <= agents.length ? agents[idx - 1] : choice;
381
+ }
382
+
383
+ const instructions = readAgentFile(agentName, 'INSTRUCTIONS.md');
384
+ if (!instructions) {
385
+ console.log(`\n Agent "${agentName}" not found or has no INSTRUCTIONS.md.\n`);
386
+ return;
387
+ }
388
+
389
+ console.log(`\n Improving agent "${agentName}".`);
390
+ console.log(' Share an example of a bad response and what you\'d prefer:\n');
391
+ const feedback = await prompt(rl, ' You: ');
392
+ if (!feedback) return;
393
+
394
+ const contextMessage = `Current INSTRUCTIONS.md for agent "${agentName}":\n\n${instructions}\n\nUser feedback on response quality: ${feedback}`;
395
+ const { lastResponse } = await conversationLoop(rl, llm, IMPROVE_PROMPT, contextMessage, maxTokens);
396
+ const files = parseGeneratedFiles(lastResponse);
397
+
398
+ if (files['INSTRUCTIONS.md']) {
399
+ writeAgentFile(agentName, 'INSTRUCTIONS.md', files['INSTRUCTIONS.md']);
400
+ console.log(`\n Updated agents/${agentName}/INSTRUCTIONS.md\n`);
401
+ } else {
402
+ console.log('\n No changes generated.\n');
403
+ }
404
+ }
405
+
406
+ function resolveAction(input: string): string | null {
407
+ const normalized = input.toLowerCase().trim();
408
+ if (['1', 'create'].includes(normalized)) return 'create';
409
+ if (['2', 'customize'].includes(normalized)) return 'customize';
410
+ if (['3', 'connect'].includes(normalized)) return 'connect';
411
+ if (['4', 'improve'].includes(normalized)) return 'improve';
412
+ return null;
413
+ }
414
+
415
+ export async function runVibe(opts: VibeOptions): Promise<void> {
416
+ const config = readConfig();
417
+
418
+ if (!config.LLM_PROVIDER || !config.LLM_API_KEY) {
419
+ console.error('\n LLM not configured. Run "operor setup" first to configure your LLM provider.\n');
420
+ process.exit(1);
421
+ }
422
+
423
+ let llm: Awaited<ReturnType<typeof createLLM>>;
424
+ try {
425
+ llm = await createLLM(config);
426
+ } catch (err: any) {
427
+ console.error(`\n Failed to initialize LLM: ${err.message}\n`);
428
+ process.exit(1);
429
+ }
430
+
431
+ const maxTokens = config.VIBE_MAX_TOKENS ? Number(config.VIBE_MAX_TOKENS) : 4000;
432
+
433
+ const rl = readline.createInterface({
434
+ input: process.stdin,
435
+ output: process.stdout,
436
+ });
437
+
438
+ // Graceful Ctrl+C
439
+ rl.on('close', () => {
440
+ console.log('\n Goodbye!\n');
441
+ process.exit(0);
442
+ });
443
+
444
+ let action = opts.action ? resolveAction(opts.action) : null;
445
+
446
+ if (!action) {
447
+ console.log(WELCOME);
448
+ const choice = await prompt(rl, ' > ');
449
+ action = resolveAction(choice);
450
+ if (!action) {
451
+ console.log('\n Invalid choice. Use: create, customize, connect, or improve.\n');
452
+ rl.close();
453
+ return;
454
+ }
455
+ }
456
+
457
+ try {
458
+ switch (action) {
459
+ case 'create':
460
+ await handleCreate(rl, llm, maxTokens);
461
+ break;
462
+ case 'customize':
463
+ await handleCustomize(rl, llm, maxTokens, opts.agent);
464
+ break;
465
+ case 'connect':
466
+ await handleConnect(rl, llm, maxTokens, opts.agent);
467
+ break;
468
+ case 'improve':
469
+ await handleImprove(rl, llm, maxTokens, opts.agent);
470
+ break;
471
+ }
472
+ } catch (err: any) {
473
+ if (err.code === 'ERR_USE_AFTER_CLOSE') return; // readline closed via Ctrl+C
474
+ console.error(`\n Error: ${err.message}\n`);
475
+ }
476
+
477
+ rl.close();
478
+ }