@operor/cli 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/README.md +76 -0
  2. package/dist/config-Bn2pbORi.js +34 -0
  3. package/dist/config-Bn2pbORi.js.map +1 -0
  4. package/dist/converse-C_PB7-JH.js +142 -0
  5. package/dist/converse-C_PB7-JH.js.map +1 -0
  6. package/dist/doctor-98gPl743.js +122 -0
  7. package/dist/doctor-98gPl743.js.map +1 -0
  8. package/dist/index.d.ts +1 -0
  9. package/dist/index.js +2268 -0
  10. package/dist/index.js.map +1 -0
  11. package/dist/llm-override-BIQl0V6H.js +445 -0
  12. package/dist/llm-override-BIQl0V6H.js.map +1 -0
  13. package/dist/reset-DT8SBgFS.js +87 -0
  14. package/dist/reset-DT8SBgFS.js.map +1 -0
  15. package/dist/simulate-BKv62GJc.js +144 -0
  16. package/dist/simulate-BKv62GJc.js.map +1 -0
  17. package/dist/status-D6LIZvQa.js +82 -0
  18. package/dist/status-D6LIZvQa.js.map +1 -0
  19. package/dist/test-DYjkxbtK.js +177 -0
  20. package/dist/test-DYjkxbtK.js.map +1 -0
  21. package/dist/test-suite-D8H_5uKs.js +209 -0
  22. package/dist/test-suite-D8H_5uKs.js.map +1 -0
  23. package/dist/utils-BuV4q7f6.js +11 -0
  24. package/dist/utils-BuV4q7f6.js.map +1 -0
  25. package/dist/vibe-Bl_js3Jo.js +395 -0
  26. package/dist/vibe-Bl_js3Jo.js.map +1 -0
  27. package/package.json +43 -0
  28. package/src/commands/analytics.ts +408 -0
  29. package/src/commands/chat.ts +310 -0
  30. package/src/commands/config.ts +34 -0
  31. package/src/commands/converse.ts +182 -0
  32. package/src/commands/doctor.ts +154 -0
  33. package/src/commands/history.ts +60 -0
  34. package/src/commands/init.ts +163 -0
  35. package/src/commands/kb.ts +429 -0
  36. package/src/commands/llm-override.ts +480 -0
  37. package/src/commands/reset.ts +72 -0
  38. package/src/commands/simulate.ts +187 -0
  39. package/src/commands/status.ts +112 -0
  40. package/src/commands/test-suite.ts +247 -0
  41. package/src/commands/test.ts +177 -0
  42. package/src/commands/vibe.ts +478 -0
  43. package/src/config.ts +127 -0
  44. package/src/index.ts +190 -0
  45. package/src/log-timestamps.ts +26 -0
  46. package/src/setup.ts +712 -0
  47. package/src/start.ts +573 -0
  48. package/src/utils.ts +6 -0
  49. package/templates/agents/_defaults/SOUL.md +20 -0
  50. package/templates/agents/_defaults/USER.md +16 -0
  51. package/templates/agents/customer-support/IDENTITY.md +6 -0
  52. package/templates/agents/customer-support/INSTRUCTIONS.md +79 -0
  53. package/templates/agents/customer-support/SOUL.md +26 -0
  54. package/templates/agents/faq-bot/IDENTITY.md +6 -0
  55. package/templates/agents/faq-bot/INSTRUCTIONS.md +53 -0
  56. package/templates/agents/faq-bot/SOUL.md +19 -0
  57. package/templates/agents/sales/IDENTITY.md +6 -0
  58. package/templates/agents/sales/INSTRUCTIONS.md +67 -0
  59. package/templates/agents/sales/SOUL.md +20 -0
  60. package/tsconfig.json +9 -0
  61. package/tsdown.config.ts +13 -0
  62. package/vitest.config.ts +8 -0
@@ -0,0 +1,480 @@
1
+ /** Approximate cost per 1M tokens by model name (input / output). */
2
+ const MODEL_COST_PER_M: Record<string, { input: number; output: number }> = {
3
+ 'gpt-5.2': { input: 1.25, output: 5 },
4
+ 'gpt-5-mini': { input: 0.25, output: 2 },
5
+ 'gpt-5-nano': { input: 0.05, output: 0.4 },
6
+ 'gpt-4o': { input: 2.5, output: 10 },
7
+ 'gpt-4o-mini': { input: 0.15, output: 0.6 },
8
+ 'gpt-4-turbo': { input: 10, output: 30 },
9
+ 'gpt-4': { input: 30, output: 60 },
10
+ 'gpt-3.5-turbo': { input: 0.5, output: 1.5 },
11
+ 'claude-sonnet-4-5-20250929': { input: 3, output: 15 },
12
+ 'claude-opus-4-20250514': { input: 15, output: 75 },
13
+ 'claude-haiku-3-5-20241022': { input: 0.8, output: 4 },
14
+ 'gemini-2.0-flash': { input: 0.1, output: 0.4 },
15
+ 'gemini-1.5-pro': { input: 1.25, output: 5 },
16
+ };
17
+
18
+ /** Estimate cost in USD from token counts and model name. Returns 0 if model unknown. */
19
+ export function estimateCost(promptTokens: number, completionTokens: number, model: string): number {
20
+ // Try exact match first, then prefix match (e.g. "gpt-4o-2024-08-06" → "gpt-4o")
21
+ const rates = MODEL_COST_PER_M[model]
22
+ || Object.entries(MODEL_COST_PER_M).find(([k]) => model.startsWith(k))?.[1];
23
+ if (!rates) return 0;
24
+ return (promptTokens * rates.input + completionTokens * rates.output) / 1_000_000;
25
+ }
26
+
27
+ /** Extract image media from an MCP tool result, returning a clean text summary for the LLM. */
28
+ function extractMediaFromToolResult(result: any): { cleanResult: any; media?: { buffer: Buffer; fileName: string; mimeType: string } } {
29
+ if (!result || !result.content || !Array.isArray(result.content)) {
30
+ return { cleanResult: result };
31
+ }
32
+
33
+ for (const item of result.content) {
34
+ // Standard MCP image content type
35
+ if (item.type === 'image' && item.data) {
36
+ const buffer = Buffer.from(item.data, 'base64');
37
+ const mimeType = item.mimeType || 'image/png';
38
+ const ext = mimeType.split('/')[1] || 'png';
39
+ const cleanContent = result.content
40
+ .filter((c: any) => c.type !== 'image')
41
+ .concat([{ type: 'text', text: `Screenshot captured successfully (${buffer.length} bytes)` }]);
42
+ return {
43
+ cleanResult: { ...result, content: cleanContent },
44
+ media: { buffer, fileName: `screenshot.${ext}`, mimeType },
45
+ };
46
+ }
47
+
48
+ // Crawl4AI format: screenshot embedded in a JSON text string
49
+ if (item.type === 'text' && typeof item.text === 'string') {
50
+ try {
51
+ const parsed = JSON.parse(item.text);
52
+ if (parsed.screenshot && typeof parsed.screenshot === 'string') {
53
+ // Base64 data (long strings)
54
+ if (parsed.screenshot.length > 1000) {
55
+ const buffer = Buffer.from(parsed.screenshot, 'base64');
56
+ const { screenshot: _, ...rest } = parsed;
57
+ rest.screenshot_captured = true;
58
+ rest.screenshot_size = buffer.length;
59
+ const cleanContent = result.content.map((c: any) =>
60
+ c === item ? { type: 'text', text: JSON.stringify(rest) } : c
61
+ );
62
+ return {
63
+ cleanResult: { ...result, content: cleanContent },
64
+ media: { buffer, fileName: 'screenshot.png', mimeType: 'image/png' },
65
+ };
66
+ }
67
+
68
+ // File path (short string matching image path pattern)
69
+ if (/^\/.*\.(png|jpe?g|webp|gif)$/i.test(parsed.screenshot)) {
70
+ try {
71
+ const fs = require('fs');
72
+ if (fs.existsSync(parsed.screenshot)) {
73
+ const buffer = fs.readFileSync(parsed.screenshot);
74
+ const ext = parsed.screenshot.split('.').pop() || 'png';
75
+ const mimeType = `image/${ext === 'jpg' ? 'jpeg' : ext}`;
76
+ const { screenshot: _, ...rest } = parsed;
77
+ rest.screenshot_captured = true;
78
+ rest.screenshot_size = buffer.length;
79
+ const cleanContent = result.content.map((c: any) =>
80
+ c === item ? { type: 'text', text: JSON.stringify(rest) } : c
81
+ );
82
+ return {
83
+ cleanResult: { ...result, content: cleanContent },
84
+ media: { buffer, fileName: `screenshot.${ext}`, mimeType },
85
+ };
86
+ }
87
+ } catch {
88
+ // File not accessible, fall through
89
+ }
90
+ }
91
+ }
92
+ } catch {
93
+ // Not JSON, skip
94
+ }
95
+ }
96
+ }
97
+
98
+ return { cleanResult: result };
99
+ }
100
+
101
+ /**
102
+ * When a crawl4ai tool returns empty content, retry using fetchContent() from @operor/knowledge.
103
+ * This is the same path /learn-url uses — it calls the /crawl endpoint with full browser rendering,
104
+ * then falls back to Readability. Returns null if not applicable or if retry also fails.
105
+ */
106
+ export async function retryCrawlWithFetchContent(
107
+ toolName: string,
108
+ toolArgs: Record<string, any>,
109
+ toolResult: any,
110
+ deps?: { fetchContentFn?: (url: string, opts: any) => Promise<any> },
111
+ ): Promise<any | null> {
112
+ // Only retry for crawl4ai tools
113
+ if (!/^crawl4ai/i.test(toolName)) return null;
114
+
115
+ // Must have a url argument to retry
116
+ const url = toolArgs?.url;
117
+ if (!url) return null;
118
+
119
+ // Check if the result is empty
120
+ if (!isCrawlResultEmpty(toolResult)) return null;
121
+
122
+ console.log(`[Operor] 🔄 crawl4ai returned empty for ${url}, retrying with fetchContent...`);
123
+
124
+ try {
125
+ const fetchContentFn = deps?.fetchContentFn
126
+ ?? (await import('@operor/knowledge')).fetchContent;
127
+
128
+ const { title, content, isMarkdown } = await fetchContentFn(url, {});
129
+
130
+ if (!content || content.trim().length === 0) {
131
+ console.log(`[Operor] ⚠️ fetchContent also returned empty for ${url}`);
132
+ return null;
133
+ }
134
+
135
+ // Trim to ~8k chars to avoid blowing up the context window
136
+ const trimmed = content.length > 8000
137
+ ? content.slice(0, 8000) + '\n\n[Content truncated]'
138
+ : content;
139
+
140
+ console.log(`[Operor] ✅ fetchContent got ${content.length} chars for "${title || url}"`);
141
+
142
+ return {
143
+ content: [{ type: 'text', text: JSON.stringify({ title, content: trimmed, url, source: 'fetchContent-fallback' }) }],
144
+ };
145
+ } catch (err: any) {
146
+ console.warn(`[Operor] ⚠️ fetchContent fallback failed for ${url}:`, err.message);
147
+ return null;
148
+ }
149
+ }
150
+
151
+ function isCrawlResultEmpty(result: any): boolean {
152
+ if (!result) return true;
153
+ const content = result?.content;
154
+ if (!content || !Array.isArray(content) || content.length === 0) return true;
155
+ // Check if all text entries are empty
156
+ return content.every((c: any) => !c.text || c.text.trim().length === 0);
157
+ }
158
+
159
+ /**
160
+ * Shared LLM override for agent.process — used by simulate, converse, and start commands.
161
+ * Replaces the default pattern-matching with real LLM-powered responses + multi-turn tool calling.
162
+ */
163
+ export function applyLLMOverride(
164
+ agent: any,
165
+ llm: any,
166
+ allTools: any[],
167
+ options?: {
168
+ systemPrompt?: string;
169
+ kbRuntime?: any;
170
+ useKB?: boolean;
171
+ guardrails?: { systemRules?: string[]; blockedTopics?: string[]; escalationTriggers?: string[]; maxResponseLength?: number };
172
+ promptSkills?: Array<{name: string, content: string}>;
173
+ },
174
+ ): void {
175
+ // Log skills once at setup time
176
+ if (options?.promptSkills?.length) {
177
+ console.log(`[Operor] 📝 Prompt skills for ${agent.config.name}: ${options.promptSkills.map(s => s.name).join(', ')}`);
178
+ }
179
+ if (allTools.length > 0) {
180
+ console.log(`[Operor] 🔧 MCP tools for ${agent.config.name}: ${allTools.map((t: any) => t.name).join(', ')}`);
181
+ }
182
+
183
+ agent.process = async (context: any) => {
184
+ const startTime = Date.now();
185
+ if (options?.promptSkills?.length) {
186
+ console.log(`[Operor] 📝 Prompt skills (injected into system prompt): ${options.promptSkills.map(s => s.name).join(', ')}`);
187
+ }
188
+ if (allTools.length > 0) {
189
+ console.log(`[Operor] 🔧 MCP tools (${allTools.length} available): ${allTools.map((t: any) => t.name).join(', ')}`);
190
+ }
191
+ try {
192
+ // Build system message: use provided systemPrompt or fall back to personality/purpose
193
+ let systemMessage = options?.systemPrompt
194
+ ?? `You are a ${agent.config.personality} customer support agent. ${agent.config.purpose}.`;
195
+
196
+ // Inject response length guidance so the LLM naturally keeps responses concise
197
+ if (options?.guardrails?.maxResponseLength) {
198
+ const limit = options.guardrails.maxResponseLength;
199
+ systemMessage += `\n\n## Response Length\n\nKeep your responses concise and under ${limit} characters. `
200
+ + 'This is a messaging app — users prefer short, clear answers. '
201
+ + 'Use short paragraphs. Avoid long lists or verbose explanations. '
202
+ + 'If the answer requires more detail, provide the key points and offer to elaborate.';
203
+ }
204
+
205
+ // KB metadata for copilot tracking
206
+ let kbMetadata: Record<string, any> | undefined;
207
+
208
+ // Inject KB context if available.
209
+ // When useKB is provided, only retrieve when true; otherwise retrieve whenever kbRuntime exists.
210
+ const shouldRetrieveKB = options?.kbRuntime && (options?.useKB !== undefined ? options.useKB : true);
211
+ if (shouldRetrieveKB) {
212
+ try {
213
+ const kbStart = Date.now();
214
+ console.log('[Operor] 📚 KB: retrieving context for:', context.currentMessage.text.substring(0, 80));
215
+ const kbResult = await options!.kbRuntime.retrieve(context.currentMessage.text);
216
+ console.log(`[Operor] 📚 KB: retrieval completed in ${((Date.now() - kbStart) / 1000).toFixed(1)}s`);
217
+ const topResult = kbResult.results?.[0];
218
+ kbMetadata = {
219
+ kbTopScore: topResult?.score ?? 0,
220
+ kbIsFaqMatch: !!kbResult.isFaqMatch,
221
+ kbTopChunkContent: topResult?.chunk?.content,
222
+ kbResultCount: kbResult.results?.length ?? 0,
223
+ kbFaqMatchCount: kbResult.faqMatches?.length ?? 0,
224
+ };
225
+ const topScore = kbResult.results?.[0]?.score ?? 0;
226
+ if (kbResult.context && topScore >= 0.50) {
227
+ console.log(`[Operor] 📚 KB: found results, isFaqMatch=${kbResult.isFaqMatch}, topScore=${topScore.toFixed(3)}, faqAnswer=${kbResult.faqAnswer ?? '(undefined)'}`);
228
+ if (kbResult.isFaqMatch) {
229
+ // When we have multiple FAQ matches from compound query splitting,
230
+ // build a combined override with all answers
231
+ if (kbResult.faqMatches && kbResult.faqMatches.length > 1) {
232
+ const faqList = kbResult.faqMatches
233
+ .map((m: any, i: number) => `${i + 1}. **Q:** ${m.faqQuestion}\n **A:** ${m.faqAnswer}`)
234
+ .join('\n');
235
+ const faqOverride = '## CRITICAL OVERRIDE — Multiple FAQ Answers\n\n'
236
+ + 'Multiple verified FAQ entries match the user\'s question.\n\n'
237
+ + faqList + '\n\n'
238
+ + 'RULES:\n'
239
+ + '1. You MUST include ALL the above answers in your response.\n'
240
+ + '2. Do NOT correct, rephrase, or "fix" the answers — the business owner wrote them intentionally.\n'
241
+ + '3. Combine them naturally in a single response.\n'
242
+ + '4. Do NOT substitute your own knowledge for the required answers.\n'
243
+ + '5. If Skill Instructions below provide conflicting guidance, follow the Skill Instructions instead.\n\n---\n\n';
244
+ systemMessage = faqOverride + systemMessage;
245
+ } else if (kbResult.faqAnswer) {
246
+ const faqOverride = '## CRITICAL OVERRIDE — FAQ Answer\n\n'
247
+ + 'A verified FAQ entry exactly matches the user\'s question.\n\n'
248
+ + (kbResult.faqQuestion ? `**User asked:** ${kbResult.faqQuestion}\n` : '')
249
+ + `**Required answer (verbatim):** ${kbResult.faqAnswer}\n\n`
250
+ + 'RULES:\n'
251
+ + '1. You MUST include the EXACT wording from the required answer above in your response.\n'
252
+ + '2. Do NOT correct, rephrase, or "fix" the answer — even if it looks like a typo or unusual value. '
253
+ + 'The business owner wrote this answer intentionally.\n'
254
+ + '3. You may add a brief friendly sentence around it, but the core answer must appear word-for-word.\n'
255
+ + '4. Do NOT substitute your own knowledge for the required answer.\n'
256
+ + '5. If Skill Instructions below provide conflicting guidance, follow the Skill Instructions instead.\n\n---\n\n';
257
+ systemMessage = faqOverride + systemMessage;
258
+ } else {
259
+ // Fallback: parse answer from the Q/A content format
260
+ const topChunk = kbResult.results?.[0]?.chunk?.content || '';
261
+ const answerMatch = topChunk.match(/^A:\s*(.+)$/m);
262
+ if (answerMatch) {
263
+ const faqOverride = '## CRITICAL OVERRIDE — FAQ Answer\n\n'
264
+ + `**Required answer:** ${answerMatch[1].trim()}\n\n`
265
+ + 'You MUST respond with the above answer. Do NOT ignore it.\n'
266
+ + 'If Skill Instructions below provide conflicting guidance, follow the Skill Instructions instead.\n\n---\n\n';
267
+ systemMessage = faqOverride + systemMessage;
268
+ } else {
269
+ systemMessage += '\n\n## Knowledge Base Context (FAQ Match)\n\n'
270
+ + 'The following FAQ answer was found that directly matches the user\'s question. '
271
+ + 'You MUST use this answer as the basis of your response. Do not ignore it or make up a different answer.\n\n'
272
+ + kbResult.context;
273
+ }
274
+ }
275
+ } else {
276
+ // Not a high-confidence FAQ match, but check if the top result is still
277
+ // an FAQ document — if so, surface the answer more prominently than
278
+ // generic KB context so the LLM doesn't ignore it.
279
+ const topResult = kbResult.results?.[0];
280
+ const topIsFaq = topResult?.document?.sourceType === 'faq';
281
+ const faqAns = topIsFaq && (topResult.chunk?.metadata?.answer || topResult.document?.metadata?.answer);
282
+
283
+ if (faqAns) {
284
+ const faqQ = topResult.chunk?.metadata?.question || topResult.document?.metadata?.question;
285
+ const faqOverride = '## FAQ Reference\n\n'
286
+ + 'A relevant FAQ entry was found in the knowledge base.\n\n'
287
+ + (faqQ ? `**FAQ question:** ${faqQ}\n` : '')
288
+ + `**FAQ answer:** ${faqAns}\n\n`
289
+ + 'You SHOULD use this FAQ answer as the basis of your response unless the user is clearly asking about something different.\n\n---\n\n';
290
+ systemMessage = faqOverride + systemMessage;
291
+ } else {
292
+ systemMessage += '\n\n## Knowledge Base Context\n\n'
293
+ + 'The following information was retrieved from the knowledge base. '
294
+ + 'If it contains information relevant to the user\'s question, you MUST use it to answer. '
295
+ + 'Do NOT say you don\'t have information if the answer is present below.\n\n'
296
+ + kbResult.context;
297
+ if (allTools.length > 0) {
298
+ systemMessage += '\n\nNote: You also have tools available. If the KB context does not fully answer the user\'s question, use your tools to look up the information.';
299
+ }
300
+ }
301
+ }
302
+ } else {
303
+ console.log('[Operor] 📚 KB: no relevant context found');
304
+ const hasTools = allTools.length > 0;
305
+ const noKbOverride = '## CRITICAL OVERRIDE — No Knowledge Base Match\n\n'
306
+ + 'The knowledge base was searched and contains NO relevant information for this query.\n\n'
307
+ + 'RULES:\n'
308
+ + '1. You MUST NOT make up, guess, or fabricate any information.\n'
309
+ + '2. You MUST NOT pretend to know the answer.\n'
310
+ + (hasTools
311
+ ? '3. However, you HAVE tools available. If the user\'s request can be answered by calling a tool (e.g. looking up an order, searching products), you MUST call the appropriate tool instead of refusing.\n'
312
+ + '4. Only say you don\'t have information if no tool can help either.\n'
313
+ + '5. Keep your response short and honest.\n\n---\n\n'
314
+ : '3. You MUST politely tell the customer you do not have information on this topic.\n'
315
+ + '4. You MAY suggest they contact support or rephrase their question.\n'
316
+ + '5. Keep your response short and honest.\n\n---\n\n');
317
+ systemMessage = noKbOverride + systemMessage;
318
+ }
319
+ } catch (kbError) {
320
+ console.warn('[Operor] ⚠️ KB retrieval failed:', (kbError as Error).message);
321
+ }
322
+ }
323
+
324
+ // If the user's message contains a URL and we have fetch tools, hint the LLM to use them
325
+ const userText = context.currentMessage.text;
326
+ const urlPattern = /https?:\/\/[^\s]+/i;
327
+ if (urlPattern.test(userText) && allTools.length > 0) {
328
+ const fetchToolNames = allTools
329
+ .filter((t: any) => /crawl|fetch|scrape|browse/i.test(t.name))
330
+ .map((t: any) => t.name);
331
+ if (fetchToolNames.length > 0) {
332
+ systemMessage += `\n\n## URL Detected\n\nThe user's message contains a URL. You MUST use one of your tools (${fetchToolNames.join(', ')}) to fetch the page content before responding. Do NOT say you cannot access URLs — you have tools that can fetch web pages.`;
333
+ }
334
+ }
335
+
336
+ // Inject prompt skill instructions LAST — highest priority position in the system prompt.
337
+ // This ensures skill directives override KB FAQ answers and all other context.
338
+ if (options?.promptSkills?.length) {
339
+ systemMessage += '\n\n## Skill Instructions (HIGHEST PRIORITY)\n\n'
340
+ + 'The following skill instructions are the HIGHEST PRIORITY directives for this agent.\n'
341
+ + 'They OVERRIDE any conflicting guidance above, including FAQ answers and Knowledge Base context.\n'
342
+ + 'You MUST follow these instructions even if they contradict KB or FAQ content.\n\n'
343
+ + options.promptSkills.map(s => `### ${s.name}\n\n${s.content}`).join('\n\n---\n\n');
344
+ }
345
+
346
+ const messages: Array<{ role: string; content: string }> = [
347
+ { role: 'system', content: systemMessage },
348
+ ...context.history.map((m: any) => ({
349
+ role: m.role as 'user' | 'assistant',
350
+ content: m.content,
351
+ })),
352
+ { role: 'user', content: context.currentMessage.text },
353
+ ];
354
+
355
+ const toolCalls: any[] = [];
356
+ let finalText = '';
357
+ let iterations = 0;
358
+ const maxIterations = 5;
359
+ const mediaAttachments: Array<{ buffer: Buffer; fileName: string; mimeType: string }> = [];
360
+
361
+ // Accumulate token usage across multi-turn tool calling loop
362
+ let totalPromptTokens = 0;
363
+ let totalCompletionTokens = 0;
364
+
365
+ while (iterations < maxIterations) {
366
+ iterations++;
367
+
368
+ const llmStart = Date.now();
369
+ console.log(`[Operor] 💭 LLM call #${iterations} (${allTools.length} tools available)...`);
370
+ const response = await llm.complete(messages, {
371
+ tools: allTools.map((t: any) => ({
372
+ name: t.name,
373
+ description: t.description,
374
+ parameters: t.parameters,
375
+ })),
376
+ });
377
+ console.log(`[Operor] 💭 LLM responded in ${((Date.now() - llmStart) / 1000).toFixed(1)}s${response.toolCalls?.length ? ` → ${response.toolCalls.length} tool call(s): ${response.toolCalls.map((tc: any) => tc.name).join(', ')}` : ' → text response'}`);
378
+
379
+ // Accumulate usage from each LLM call
380
+ if (response.usage) {
381
+ totalPromptTokens += response.usage.promptTokens || 0;
382
+ totalCompletionTokens += response.usage.completionTokens || 0;
383
+ }
384
+
385
+ if (response.toolCalls && response.toolCalls.length > 0) {
386
+ const executedTools: Array<{ name: string; result: any; success: boolean; error?: string }> = [];
387
+
388
+ for (const tc of response.toolCalls) {
389
+ const tool = allTools.find((t: any) => t.name === tc.name);
390
+ if (!tool) continue;
391
+ const toolStart = Date.now();
392
+ console.log(`[Operor] 🔧 Executing tool: ${tc.name}${tc.arguments?.url ? ` (${tc.arguments.url})` : ''}...`);
393
+ try {
394
+ const rawResult = await tool.execute(tc.arguments);
395
+ const { cleanResult, media } = extractMediaFromToolResult(rawResult);
396
+ if (media) {
397
+ mediaAttachments.push(media);
398
+ console.log(`[Operor] 🖼️ Extracted ${media.mimeType} (${media.buffer.length} bytes) from tool ${tc.name}`);
399
+ }
400
+
401
+ // If a crawl4ai tool returned empty content, retry with fetchContent (same path as /learn-url)
402
+ const fallbackResult = await retryCrawlWithFetchContent(tc.name, tc.arguments, cleanResult);
403
+ const finalResult = fallbackResult ?? cleanResult;
404
+
405
+ const duration = Date.now() - toolStart;
406
+ console.log(`[Operor] 🔧 Tool ${tc.name} completed in ${(duration / 1000).toFixed(1)}s${fallbackResult ? ' (via fetchContent fallback)' : ''}`);
407
+ toolCalls.push({ id: tc.id, name: tc.name, params: tc.arguments, result: finalResult, success: true, duration });
408
+ executedTools.push({ name: tc.name, result: finalResult, success: true });
409
+ } catch (err: any) {
410
+ const duration = Date.now() - toolStart;
411
+ console.log(`[Operor] 🔧 Tool ${tc.name} failed after ${(duration / 1000).toFixed(1)}s, attempting fallback...`);
412
+ const errorMsg = err.message || err.toString?.() || 'Unknown tool error';
413
+
414
+ // If a crawl4ai tool threw (e.g. empty result treated as error), try fetchContent fallback
415
+ const catchFallback = await retryCrawlWithFetchContent(tc.name, tc.arguments, null);
416
+ if (catchFallback) {
417
+ toolCalls.push({ id: tc.id, name: tc.name, params: tc.arguments, result: catchFallback, success: true, duration });
418
+ executedTools.push({ name: tc.name, result: catchFallback, success: true });
419
+ } else {
420
+ toolCalls.push({ id: tc.id, name: tc.name, params: tc.arguments, result: null, success: false, error: errorMsg, duration });
421
+ executedTools.push({ name: tc.name, result: null, success: false, error: errorMsg });
422
+ }
423
+ }
424
+ }
425
+
426
+ const toolResultSummary = executedTools.map(tc =>
427
+ `[Tool ${tc.name}]: ${JSON.stringify(tc.success ? tc.result : { error: tc.error })}`
428
+ ).join('\n');
429
+
430
+ messages.push(
431
+ { role: 'assistant', content: `I'll call ${executedTools.map(tc => tc.name).join(', ')} to help with that.` },
432
+ { role: 'user', content: `Tool results:\n${toolResultSummary}\n\nPlease use these results to respond to the customer.` }
433
+ );
434
+ } else {
435
+ finalText = response.text;
436
+ break;
437
+ }
438
+ }
439
+
440
+ const modelName = llm.getModelName?.() || llm.config?.model || '';
441
+ const cost = estimateCost(totalPromptTokens, totalCompletionTokens, modelName);
442
+
443
+ // Attach the first extracted media (e.g. screenshot) to the response
444
+ const media = mediaAttachments[0];
445
+
446
+ return {
447
+ text: finalText,
448
+ toolCalls,
449
+ duration: Date.now() - startTime,
450
+ cost: cost > 0 ? cost : undefined,
451
+ usage: { promptTokens: totalPromptTokens, completionTokens: totalCompletionTokens },
452
+ metadata: kbMetadata,
453
+ ...(media && {
454
+ mediaBuffer: media.buffer,
455
+ mediaFileName: media.fileName,
456
+ mediaMimeType: media.mimeType,
457
+ }),
458
+ };
459
+ } catch (processError: any) {
460
+ const msg = processError.message || '';
461
+ const code = processError.code || '';
462
+ let errorMessage = 'Sorry, something went wrong. Please try again.';
463
+
464
+ if (msg.includes('ECONNREFUSED') || msg.includes('ETIMEDOUT') || msg.includes('ENOTFOUND') ||
465
+ msg.includes('fetch failed') || code === 'ECONNREFUSED' || code === 'ETIMEDOUT' ||
466
+ code === 'UND_ERR_CONNECT_TIMEOUT') {
467
+ errorMessage = 'Cannot connect to LLM API. Check your network connection and LLM_API_KEY.';
468
+ } else if (msg.includes('401') || msg.includes('403') || msg.includes('Unauthorized') ||
469
+ msg.includes('Incorrect API key') || msg.includes('API key')) {
470
+ errorMessage = 'LLM API authentication failed. Check your LLM_API_KEY in .env';
471
+ } else if (msg.includes('429') || msg.includes('rate limit')) {
472
+ errorMessage = 'LLM API rate limit exceeded. Please try again later.';
473
+ } else if (msg) {
474
+ errorMessage = `LLM error: ${msg}`;
475
+ }
476
+
477
+ return { text: errorMessage, toolCalls: [], duration: Date.now() - startTime };
478
+ }
479
+ };
480
+ }
@@ -0,0 +1,72 @@
1
+ import { existsSync, unlinkSync, rmSync, statSync } from 'node:fs';
2
+ import { resolve } from 'node:path';
3
+ import { createInterface } from 'node:readline';
4
+ import { readConfig } from '../config.js';
5
+
6
+ function fileSize(p: string): string {
7
+ try {
8
+ const s = statSync(p).size;
9
+ if (s < 1024) return `${s} B`;
10
+ if (s < 1024 * 1024) return `${(s / 1024).toFixed(1)} KB`;
11
+ return `${(s / (1024 * 1024)).toFixed(1)} MB`;
12
+ } catch { return '?'; }
13
+ }
14
+
15
+ export async function runReset(opts: { yes?: boolean; keepConfig?: boolean }) {
16
+ const config = readConfig();
17
+ const cwd = process.cwd();
18
+
19
+ const targets: { label: string; path: string; isDir?: boolean }[] = [
20
+ { label: 'Memory DB', path: resolve(cwd, config.MEMORY_DB_PATH || './operor.db') },
21
+ { label: 'Knowledge Base DB', path: resolve(cwd, config.KB_DB_PATH || './knowledge.db') },
22
+ { label: 'Copilot DB', path: resolve(cwd, config.COPILOT_DB_PATH || './copilot.db') },
23
+ { label: 'Analytics DB', path: resolve(cwd, config.ANALYTICS_DB_PATH || './analytics.db') },
24
+ ];
25
+
26
+ if (!opts.keepConfig) {
27
+ targets.push({ label: 'MCP skills config', path: resolve(cwd, 'mcp.json') });
28
+ }
29
+
30
+ targets.push({ label: 'Baileys auth state', path: resolve(cwd, 'baileys_auth'), isDir: true });
31
+
32
+ const found = targets.filter(t => existsSync(t.path));
33
+
34
+ if (found.length === 0) {
35
+ console.log('Nothing to reset — no data files found.');
36
+ return;
37
+ }
38
+
39
+ console.log('\nThe following will be deleted:\n');
40
+ for (const t of found) {
41
+ const size = t.isDir ? 'dir' : fileSize(t.path);
42
+ console.log(` ${t.label.padEnd(22)} ${t.path} (${size})`);
43
+ }
44
+ console.log('');
45
+
46
+ if (!opts.yes) {
47
+ const rl = createInterface({ input: process.stdin, output: process.stdout });
48
+ const answer = await new Promise<string>(r => rl.question('Type "yes" to confirm: ', r));
49
+ rl.close();
50
+ if (answer.trim().toLowerCase() !== 'yes') {
51
+ console.log('Aborted.');
52
+ return;
53
+ }
54
+ }
55
+
56
+ let deleted = 0;
57
+ for (const t of found) {
58
+ try {
59
+ if (t.isDir) {
60
+ rmSync(t.path, { recursive: true, force: true });
61
+ } else {
62
+ unlinkSync(t.path);
63
+ }
64
+ console.log(` Deleted: ${t.label}`);
65
+ deleted++;
66
+ } catch (err: any) {
67
+ console.error(` Failed: ${t.label} — ${err.message}`);
68
+ }
69
+ }
70
+
71
+ console.log(`\nReset complete. ${deleted}/${found.length} items deleted.`);
72
+ }