@messenger-box/tailwind-ui-inbox 10.0.3-alpha.72 → 10.0.3-alpha.74
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +8 -0
- package/lib/components/AIAgent/AIAgent.d.ts +7 -0
- package/lib/components/AIAgent/AIAgent.d.ts.map +1 -1
- package/lib/components/AIAgent/AIAgent.js +362 -615
- package/lib/components/AIAgent/AIAgent.js.map +1 -1
- package/lib/components/InboxMessage/InputComponent.d.ts.map +1 -1
- package/lib/components/InboxMessage/InputComponent.js +143 -140
- package/lib/components/InboxMessage/InputComponent.js.map +1 -1
- package/lib/components/InboxMessage/RightSidebarAi.d.ts +23 -0
- package/lib/components/InboxMessage/RightSidebarAi.d.ts.map +1 -0
- package/lib/components/InboxMessage/RightSidebarAi.js +9 -0
- package/lib/components/InboxMessage/RightSidebarAi.js.map +1 -0
- package/lib/components/InboxMessage/index.d.ts +1 -0
- package/lib/components/InboxMessage/index.d.ts.map +1 -1
- package/lib/components/InboxMessage/message-widgets/ErrorFixCard.d.ts +11 -0
- package/lib/components/InboxMessage/message-widgets/ErrorFixCard.d.ts.map +1 -0
- package/lib/components/InboxMessage/message-widgets/ErrorFixCard.js +194 -0
- package/lib/components/InboxMessage/message-widgets/ErrorFixCard.js.map +1 -0
- package/lib/components/InboxMessage/message-widgets/ModernMessageGroup.d.ts +5 -1
- package/lib/components/InboxMessage/message-widgets/ModernMessageGroup.d.ts.map +1 -1
- package/lib/components/InboxMessage/message-widgets/ModernMessageGroup.js +308 -857
- package/lib/components/InboxMessage/message-widgets/ModernMessageGroup.js.map +1 -1
- package/lib/components/ModelConfigPanel.d.ts +12 -0
- package/lib/components/ModelConfigPanel.d.ts.map +1 -0
- package/lib/components/ModelConfigPanel.js +304 -0
- package/lib/components/ModelConfigPanel.js.map +1 -0
- package/lib/components/filler-components/RightSiderBar.d.ts +24 -0
- package/lib/components/filler-components/RightSiderBar.d.ts.map +1 -0
- package/lib/components/filler-components/RightSiderBar.js +335 -0
- package/lib/components/filler-components/RightSiderBar.js.map +1 -0
- package/lib/components/index.d.ts +4 -2
- package/lib/components/index.d.ts.map +1 -1
- package/lib/components/live-code-editor/hybrid-live-editor.d.ts +20 -0
- package/lib/components/live-code-editor/hybrid-live-editor.d.ts.map +1 -0
- package/lib/components/live-code-editor/hybrid-live-editor.js +68 -0
- package/lib/components/live-code-editor/hybrid-live-editor.js.map +1 -0
- package/lib/components/live-code-editor/index.d.ts +4 -0
- package/lib/components/live-code-editor/index.d.ts.map +1 -0
- package/lib/components/live-code-editor/live-code-editor.d.ts +14 -0
- package/lib/components/live-code-editor/live-code-editor.d.ts.map +1 -0
- package/lib/components/live-code-editor/live-code-editor.js +207 -0
- package/lib/components/live-code-editor/live-code-editor.js.map +1 -0
- package/lib/components/slot-fill/chat-message-filler.js +1 -1
- package/lib/components/slot-fill/chat-message-filler.js.map +1 -1
- package/lib/components/slot-fill/index.d.ts +1 -0
- package/lib/components/slot-fill/index.d.ts.map +1 -1
- package/lib/components/slot-fill/right-sidebar-filler.d.ts +4 -0
- package/lib/components/slot-fill/right-sidebar-filler.d.ts.map +1 -0
- package/lib/components/slot-fill/right-sidebar-filler.js +13 -0
- package/lib/components/slot-fill/right-sidebar-filler.js.map +1 -0
- package/lib/components/ui/button.d.ts +9 -0
- package/lib/components/ui/button.d.ts.map +1 -0
- package/lib/compute.js +1 -2
- package/lib/container/AiInbox.d.ts.map +1 -1
- package/lib/container/AiLandingInput.d.ts.map +1 -1
- package/lib/container/AiLandingInput.js +46 -119
- package/lib/container/AiLandingInput.js.map +1 -1
- package/lib/container/Inbox.js +1 -1
- package/lib/container/Inbox.js.map +1 -1
- package/lib/container/InboxAiMessagesLoader.d.ts +0 -21
- package/lib/container/InboxAiMessagesLoader.d.ts.map +1 -1
- package/lib/container/InboxAiMessagesLoader.js +18 -35
- package/lib/container/InboxAiMessagesLoader.js.map +1 -1
- package/lib/container/ServiceInbox.js +1 -1
- package/lib/container/ServiceInbox.js.map +1 -1
- package/lib/container/ThreadMessages.js +1 -1
- package/lib/container/ThreadMessages.js.map +1 -1
- package/lib/container/ThreadMessagesInbox.js +1 -1
- package/lib/container/ThreadMessagesInbox.js.map +1 -1
- package/lib/container/Threads.js +1 -1
- package/lib/container/Threads.js.map +1 -1
- package/lib/container/index.d.ts +5 -4
- package/lib/container/index.d.ts.map +1 -1
- package/lib/enums/messenger-slot-fill-name-enum.d.ts +2 -1
- package/lib/enums/messenger-slot-fill-name-enum.d.ts.map +1 -1
- package/lib/enums/messenger-slot-fill-name-enum.js +1 -0
- package/lib/enums/messenger-slot-fill-name-enum.js.map +1 -1
- package/lib/hooks/index.d.ts +3 -0
- package/lib/hooks/index.d.ts.map +1 -0
- package/lib/hooks/use-file-sync.d.ts +16 -0
- package/lib/hooks/use-file-sync.d.ts.map +1 -0
- package/lib/hooks/use-file-sync.js +63 -0
- package/lib/hooks/use-file-sync.js.map +1 -0
- package/lib/hooks/usePersistentModelConfig.d.ts +15 -0
- package/lib/hooks/usePersistentModelConfig.d.ts.map +1 -0
- package/lib/hooks/usePersistentModelConfig.js +46 -0
- package/lib/hooks/usePersistentModelConfig.js.map +1 -0
- package/lib/index.d.ts +5 -2
- package/lib/index.d.ts.map +1 -1
- package/lib/index.js +1 -1
- package/lib/machines/aiAgentMachine.d.ts.map +1 -1
- package/lib/machines/aiAgentMachine.js +64 -21
- package/lib/machines/aiAgentMachine.js.map +1 -1
- package/lib/machines/aiAgentMachine.simple.d.ts +3 -0
- package/lib/machines/aiAgentMachine.simple.d.ts.map +1 -0
- package/lib/machines/aiAgentMachine.simple.js +108 -0
- package/lib/machines/aiAgentMachine.simple.js.map +1 -0
- package/lib/machines/index.d.ts +3 -0
- package/lib/machines/index.d.ts.map +1 -0
- package/lib/module.d.ts +2 -1
- package/lib/module.d.ts.map +1 -1
- package/lib/module.js +11 -3
- package/lib/module.js.map +1 -1
- package/lib/routes.json +1 -2
- package/lib/templates/InboxWithAi.d.ts.map +1 -1
- package/lib/templates/InboxWithAi.js +129 -70
- package/lib/templates/InboxWithAi.js.map +1 -1
- package/lib/templates/InboxWithAi.tsx +151 -90
- package/lib/templates/index.d.ts +2 -0
- package/lib/templates/index.d.ts.map +1 -0
- package/lib/templates/index.ts +1 -0
- package/lib/utils/utils.d.ts +2 -0
- package/lib/utils/utils.d.ts.map +1 -0
- package/lib/utils/utils.js +3 -0
- package/lib/utils/utils.js.map +1 -0
- package/package.json +8 -5
- package/src/components/AIAgent/AIAgent.tsx +469 -731
- package/src/components/AIAgent/AIAgent.tsx.bk +1365 -0
- package/src/components/InboxMessage/InputComponent.tsx +2 -1
- package/src/components/InboxMessage/RightSidebarAi.tsx +37 -0
- package/src/components/InboxMessage/index.ts +1 -0
- package/src/components/InboxMessage/message-widgets/ErrorFixCard.tsx +240 -0
- package/src/components/InboxMessage/message-widgets/ModernMessageGroup.tsx +337 -1116
- package/src/components/ModelConfigPanel.tsx +334 -0
- package/src/components/filler-components/RightSiderBar.tsx +408 -0
- package/src/components/index.ts +4 -1
- package/src/components/live-code-editor/hybrid-live-editor.tsx +105 -0
- package/src/components/live-code-editor/index.ts +3 -0
- package/src/components/live-code-editor/live-code-editor.tsx +257 -0
- package/src/components/slot-fill/index.ts +1 -0
- package/src/components/slot-fill/right-sidebar-filler.tsx +39 -0
- package/src/components/ui/button.tsx +32 -0
- package/src/container/AiInbox.tsx +26 -3
- package/src/container/AiLandingInput.tsx +48 -22
- package/src/container/InboxAiMessagesLoader.tsx +17 -41
- package/src/container/index.ts +14 -6
- package/src/enums/messenger-slot-fill-name-enum.ts +1 -0
- package/src/hooks/index.ts +2 -0
- package/src/hooks/use-file-sync.ts +91 -0
- package/src/hooks/usePersistentModelConfig.ts +63 -0
- package/src/index.ts +19 -1
- package/src/machines/aiAgentMachine.simple.ts +89 -0
- package/src/machines/aiAgentMachine.ts +67 -19
- package/src/machines/aiAgentMachine.ts.bk +1296 -0
- package/src/machines/index.ts +2 -0
- package/src/module.tsx +10 -1
- package/src/templates/InboxWithAi.tsx +151 -90
- package/src/templates/index.ts +1 -0
- package/src/utils/utils.ts +3 -0
- package/lib/components/InboxMessage/MessageInputComponent.js +0 -173
- package/lib/components/InboxMessage/MessageInputComponent.js.map +0 -1
- package/lib/components/InboxMessage/MessagesBuilderUi.js +0 -162
- package/lib/components/InboxMessage/MessagesBuilderUi.js.map +0 -1
- package/lib/container/AiInbox.js +0 -1520
- package/lib/container/AiInbox.js.map +0 -1
- package/lib/container/AiInboxWithLoader.js +0 -300
- package/lib/container/AiInboxWithLoader.js.map +0 -1
- package/lib/container/InboxTemplate1.js +0 -1375
- package/lib/container/InboxTemplate1.js.map +0 -1
- package/lib/container/InboxTemplate2.js +0 -1426
- package/lib/container/InboxTemplate2.js.map +0 -1
|
@@ -0,0 +1,1296 @@
|
|
|
1
|
+
import { createMachine, assign, fromPromise } from 'xstate';
|
|
2
|
+
import { AIAgentContext, AIAgentEvent, Message, MCPData } from './types';
|
|
3
|
+
import { config } from '../config';
|
|
4
|
+
const { CLIENT_URL, NEWS_API_KEY } = config;
|
|
5
|
+
const env = {
|
|
6
|
+
NEWS_API_KEY: NEWS_API_KEY,
|
|
7
|
+
};
|
|
8
|
+
|
|
9
|
+
// API configuration - using environment variables from dev.env
|
|
10
|
+
// Read persisted model configuration saved by usePersistentModelConfig
|
|
11
|
+
function getUserModelConfig(): { provider?: 'openai' | 'anthropic' | 'gemini'; model?: string; apiKey?: string } {
|
|
12
|
+
try {
|
|
13
|
+
if (typeof window === 'undefined') return {};
|
|
14
|
+
const raw = window.localStorage.getItem('mbx:model-config');
|
|
15
|
+
if (!raw) return {};
|
|
16
|
+
const parsed = JSON.parse(raw);
|
|
17
|
+
return {
|
|
18
|
+
provider: parsed?.provider,
|
|
19
|
+
model: parsed?.model,
|
|
20
|
+
apiKey: parsed?.apiKey,
|
|
21
|
+
};
|
|
22
|
+
} catch {
|
|
23
|
+
return {};
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
const USER_MODEL_CONFIG = getUserModelConfig();
|
|
28
|
+
|
|
29
|
+
const API_CONFIG = {
|
|
30
|
+
// Only non-AI keys from env (public APIs allowed)
|
|
31
|
+
NEWS_API_KEY: env?.NEWS_API_KEY,
|
|
32
|
+
|
|
33
|
+
// Configurable proxy endpoints
|
|
34
|
+
ANTHROPIC_ENDPOINT: '/api/anthropic/messages',
|
|
35
|
+
OPENAI_ENDPOINT: '/api/openai',
|
|
36
|
+
|
|
37
|
+
// Direct MCP search APIs (these are public and don't have CORS issues)
|
|
38
|
+
NEWS_API_ENDPOINT: 'https://newsapi.org/v2/everything',
|
|
39
|
+
REDDIT_SEARCH_ENDPOINT: 'https://www.reddit.com/search.json',
|
|
40
|
+
REDDIT_TRENDING_ENDPOINT: 'https://www.reddit.com/r/news/hot.json',
|
|
41
|
+
DUCKDUCKGO_ENDPOINT: 'https://api.duckduckgo.com/',
|
|
42
|
+
|
|
43
|
+
// Model configuration
|
|
44
|
+
DEFAULT_MODEL: 'claude-3-5-haiku-20241022',
|
|
45
|
+
OPENAI_MODEL: 'gpt-4o-mini',
|
|
46
|
+
|
|
47
|
+
// User-provided model configuration (from localStorage)
|
|
48
|
+
get userProvider() {
|
|
49
|
+
return USER_MODEL_CONFIG?.provider;
|
|
50
|
+
},
|
|
51
|
+
get userModel() {
|
|
52
|
+
return USER_MODEL_CONFIG?.model;
|
|
53
|
+
},
|
|
54
|
+
get userApiKey() {
|
|
55
|
+
return USER_MODEL_CONFIG?.apiKey;
|
|
56
|
+
},
|
|
57
|
+
|
|
58
|
+
// Check which AI provider is available (ONLY from user config)
|
|
59
|
+
get availableProvider() {
|
|
60
|
+
if (this.userApiKey && this.userProvider) {
|
|
61
|
+
if (this.userProvider === 'anthropic') return 'anthropic';
|
|
62
|
+
if (this.userProvider === 'openai') return 'openai';
|
|
63
|
+
}
|
|
64
|
+
return null;
|
|
65
|
+
},
|
|
66
|
+
|
|
67
|
+
// Get the appropriate endpoint based on available provider
|
|
68
|
+
get primaryEndpoint() {
|
|
69
|
+
if (this.availableProvider === 'anthropic') return this.ANTHROPIC_ENDPOINT;
|
|
70
|
+
if (this.availableProvider === 'openai') return this.OPENAI_ENDPOINT;
|
|
71
|
+
return '';
|
|
72
|
+
},
|
|
73
|
+
|
|
74
|
+
// Get the appropriate model based on available provider
|
|
75
|
+
get primaryModel() {
|
|
76
|
+
if (this.userModel) return this.userModel;
|
|
77
|
+
if (this.availableProvider === 'anthropic') return this.DEFAULT_MODEL;
|
|
78
|
+
if (this.availableProvider === 'openai') return this.OPENAI_MODEL;
|
|
79
|
+
return this.DEFAULT_MODEL;
|
|
80
|
+
},
|
|
81
|
+
};
|
|
82
|
+
|
|
83
|
+
// Log configuration on startup
|
|
84
|
+
console.log('🤖 AI Agent Machine Configuration:');
|
|
85
|
+
console.log(` Available Provider (user): ${API_CONFIG.availableProvider || 'None'}`);
|
|
86
|
+
console.log(` Primary Endpoint: ${API_CONFIG.primaryEndpoint}`);
|
|
87
|
+
console.log(` Primary Model: ${API_CONFIG.primaryModel}`);
|
|
88
|
+
console.log(` Using Proxy for AI APIs: ✅ (avoids CORS issues)`);
|
|
89
|
+
console.log(` Using Direct APIs for MCP Search: ✅ (public APIs)`);
|
|
90
|
+
console.log(` User API Key: ${API_CONFIG.userApiKey ? '✅ Configured' : '❌ Not configured'}`);
|
|
91
|
+
console.log(` News API Key: ${API_CONFIG.NEWS_API_KEY ? '✅ Configured' : '❌ Not configured'}`);
|
|
92
|
+
|
|
93
|
+
// Test API configuration
|
|
94
|
+
if (!API_CONFIG.availableProvider) {
|
|
95
|
+
console.error('🚨 CRITICAL: No AI API provider configured!');
|
|
96
|
+
console.error('🚨 Please add your API key in Model Settings.');
|
|
97
|
+
} else {
|
|
98
|
+
console.log(`✅ AI API provider configured: ${API_CONFIG.availableProvider}`);
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
// Helper function to get the base URL for proxy calls
|
|
102
|
+
function getProxyBaseUrl(): string {
|
|
103
|
+
// Use Vite dev server proxy to avoid CORS issues
|
|
104
|
+
return CLIENT_URL || 'http://localhost:3011'; // Vite dev server
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
// Log the current proxy configuration
|
|
108
|
+
console.log(`🌐 Proxy Configuration: ${getProxyBaseUrl() || 'Relative paths (production)'}`);
|
|
109
|
+
|
|
110
|
+
// Helper function to make API calls through proxy (avoids CORS)
|
|
111
|
+
async function makeApiCall(endpoint: string, options: RequestInit) {
|
|
112
|
+
try {
|
|
113
|
+
const baseUrl = getProxyBaseUrl();
|
|
114
|
+
const url = endpoint ? `${baseUrl}${endpoint}` : '';
|
|
115
|
+
|
|
116
|
+
console.log(`🌐 Making API call through proxy to: ${url}`);
|
|
117
|
+
console.log(`📤 Request options:`, {
|
|
118
|
+
method: options.method,
|
|
119
|
+
headers: options.headers,
|
|
120
|
+
body: options.body ? JSON.parse(options.body as string) : undefined,
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
// Do not call if no endpoint (no provider configured)
|
|
124
|
+
if (!endpoint) {
|
|
125
|
+
throw new Error('AI provider is not configured. Please add your API key in Model Settings.');
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Inject API key headers from user config if available
|
|
129
|
+
const headers = new Headers(options.headers || {});
|
|
130
|
+
if (API_CONFIG.userApiKey) {
|
|
131
|
+
if (API_CONFIG.availableProvider === 'anthropic') {
|
|
132
|
+
headers.set('x-api-key', API_CONFIG.userApiKey);
|
|
133
|
+
headers.set('anthropic-version', '2023-06-01');
|
|
134
|
+
headers.set('anthropic-dangerous-direct-browser-access', 'true');
|
|
135
|
+
} else if (API_CONFIG.availableProvider === 'openai') {
|
|
136
|
+
headers.set('Authorization', `Bearer ${API_CONFIG.userApiKey}`);
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
const response = await fetch(url, { ...options, headers });
|
|
141
|
+
|
|
142
|
+
if (!response.ok) {
|
|
143
|
+
const errorData = await response.json().catch(() => ({}));
|
|
144
|
+
console.error('🚨 API Error:', response.status, errorData);
|
|
145
|
+
|
|
146
|
+
let errorMessage = 'API request failed';
|
|
147
|
+
if (errorData.error) {
|
|
148
|
+
if (typeof errorData.error === 'string') {
|
|
149
|
+
errorMessage = errorData.error;
|
|
150
|
+
} else if (errorData.error.message) {
|
|
151
|
+
errorMessage = errorData.error.message;
|
|
152
|
+
} else if (errorData.error.type) {
|
|
153
|
+
errorMessage = `${errorData.error.type}: ${errorData.error.message || 'Unknown error'}`;
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
throw new Error(errorMessage);
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
return response;
|
|
161
|
+
} catch (error) {
|
|
162
|
+
console.error('🚨 API call error:', error);
|
|
163
|
+
throw error;
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
// Helper function to fetch news from NewsAPI directly
|
|
168
|
+
async function fetchNewsFromNewsAPI(query: string) {
|
|
169
|
+
if (!API_CONFIG.NEWS_API_KEY) {
|
|
170
|
+
console.warn('⚠️ News API key not found, skipping NewsAPI');
|
|
171
|
+
return [];
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
try {
|
|
175
|
+
const response = await fetch(
|
|
176
|
+
`${API_CONFIG.NEWS_API_ENDPOINT}?q=${encodeURIComponent(query)}&apiKey=${
|
|
177
|
+
API_CONFIG.NEWS_API_KEY
|
|
178
|
+
}&language=en&sortBy=publishedAt&pageSize=5`,
|
|
179
|
+
);
|
|
180
|
+
|
|
181
|
+
if (!response.ok) {
|
|
182
|
+
console.error('NewsAPI error:', response.status);
|
|
183
|
+
return [];
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
const data = await response.json();
|
|
187
|
+
|
|
188
|
+
if (data.articles) {
|
|
189
|
+
return data.articles.map((article: any) => ({
|
|
190
|
+
title: article.title,
|
|
191
|
+
url: article.url,
|
|
192
|
+
description: article.description || article.title,
|
|
193
|
+
publishedAt: article.publishedAt,
|
|
194
|
+
source: { name: article.source.name },
|
|
195
|
+
}));
|
|
196
|
+
}
|
|
197
|
+
} catch (error) {
|
|
198
|
+
console.error('NewsAPI error:', error);
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
return [];
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// Helper function to fetch from Reddit API directly
|
|
205
|
+
async function fetchAlternativeNews(query: string) {
|
|
206
|
+
try {
|
|
207
|
+
const response = await fetch(
|
|
208
|
+
`${API_CONFIG.REDDIT_SEARCH_ENDPOINT}?q=${encodeURIComponent(
|
|
209
|
+
query,
|
|
210
|
+
)}&sort=new&t=day&limit=3&restrict_sr=false&type=link`,
|
|
211
|
+
);
|
|
212
|
+
|
|
213
|
+
if (!response.ok) {
|
|
214
|
+
console.error('Reddit API error:', response.status);
|
|
215
|
+
return [];
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
const data = await response.json();
|
|
219
|
+
|
|
220
|
+
if (data?.data?.children) {
|
|
221
|
+
return data.data.children
|
|
222
|
+
.filter((post: any) => post.data.url && !post.data.is_self)
|
|
223
|
+
.slice(0, 3)
|
|
224
|
+
.map((post: any) => ({
|
|
225
|
+
title: post.data.title,
|
|
226
|
+
url: post.data.url,
|
|
227
|
+
description: `Discussion: ${post.data.title} (${post.data.score} upvotes)`,
|
|
228
|
+
publishedAt: new Date(post.data.created_utc * 1000).toISOString(),
|
|
229
|
+
source: { name: `r/${post.data.subreddit}` },
|
|
230
|
+
}));
|
|
231
|
+
}
|
|
232
|
+
} catch (error) {
|
|
233
|
+
console.error('Reddit API error:', error);
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
return [];
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
// Helper function to search DuckDuckGo directly
|
|
240
|
+
async function searchDuckDuckGo(query: string) {
|
|
241
|
+
try {
|
|
242
|
+
const response = await fetch(
|
|
243
|
+
`${API_CONFIG.DUCKDUCKGO_ENDPOINT}?q=${encodeURIComponent(query)}&format=json&no_html=1&skip_disambig=1`,
|
|
244
|
+
);
|
|
245
|
+
|
|
246
|
+
if (!response.ok) {
|
|
247
|
+
console.error('DuckDuckGo API error:', response.status);
|
|
248
|
+
return [];
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
const data = await response.json();
|
|
252
|
+
const results: any[] = [];
|
|
253
|
+
|
|
254
|
+
// Add abstract if available
|
|
255
|
+
if (data.Abstract) {
|
|
256
|
+
results.push({
|
|
257
|
+
title: data.Heading || `About ${query}`,
|
|
258
|
+
url: data.AbstractURL || `https://duckduckgo.com/?q=${encodeURIComponent(query)}`,
|
|
259
|
+
snippet: data.Abstract,
|
|
260
|
+
source: 'DuckDuckGo',
|
|
261
|
+
});
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
// Add related topics
|
|
265
|
+
if (data.RelatedTopics && data.RelatedTopics.length > 0) {
|
|
266
|
+
data.RelatedTopics.slice(0, 3).forEach((topic: any) => {
|
|
267
|
+
if (topic.Text && topic.FirstURL) {
|
|
268
|
+
results.push({
|
|
269
|
+
title: topic.Text.split(' - ')[0] || topic.Text.substring(0, 60),
|
|
270
|
+
url: topic.FirstURL,
|
|
271
|
+
snippet: topic.Text,
|
|
272
|
+
source: 'DuckDuckGo',
|
|
273
|
+
});
|
|
274
|
+
}
|
|
275
|
+
});
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
return results;
|
|
279
|
+
} catch (error) {
|
|
280
|
+
console.error('DuckDuckGo API error:', error);
|
|
281
|
+
return [];
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
// Helper function to get trending news from Reddit directly
|
|
286
|
+
async function getTrendingNews() {
|
|
287
|
+
try {
|
|
288
|
+
const response = await fetch(`${API_CONFIG.REDDIT_TRENDING_ENDPOINT}?limit=5`);
|
|
289
|
+
|
|
290
|
+
if (!response.ok) {
|
|
291
|
+
console.error('Reddit trending error:', response.status);
|
|
292
|
+
return [];
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
const data = await response.json();
|
|
296
|
+
|
|
297
|
+
if (data?.data?.children) {
|
|
298
|
+
return data.data.children
|
|
299
|
+
.filter((post: any) => post.data.url && !post.data.is_self)
|
|
300
|
+
.map((post: any) => ({
|
|
301
|
+
title: post.data.title,
|
|
302
|
+
url: post.data.url,
|
|
303
|
+
description: `Trending: ${post.data.title} (${post.data.score} upvotes, ${post.data.num_comments} comments)`,
|
|
304
|
+
publishedAt: new Date(post.data.created_utc * 1000).toISOString(),
|
|
305
|
+
source: { name: 'r/news' },
|
|
306
|
+
}));
|
|
307
|
+
}
|
|
308
|
+
} catch (error) {
|
|
309
|
+
console.error('Reddit trending error:', error);
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
return [];
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
// Direct MCP search function - no more server dependency
|
|
316
|
+
async function performDirectMCPSearch(query: string, includeNews: boolean = true, includeWeb: boolean = true) {
|
|
317
|
+
try {
|
|
318
|
+
console.log(`🔍 Performing direct MCP search for: "${query}" (news: ${includeNews}, web: ${includeWeb})`);
|
|
319
|
+
|
|
320
|
+
const promises = [];
|
|
321
|
+
|
|
322
|
+
// Fetch news if requested
|
|
323
|
+
if (includeNews) {
|
|
324
|
+
// Try multiple news sources
|
|
325
|
+
promises.push(fetchNewsFromNewsAPI(query), fetchAlternativeNews(query));
|
|
326
|
+
|
|
327
|
+
// For US news specifically, get trending
|
|
328
|
+
if (query.toLowerCase().includes('us news') || query.toLowerCase().includes('american news')) {
|
|
329
|
+
promises.push(getTrendingNews());
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
// Fetch web search if requested
|
|
334
|
+
if (includeWeb) {
|
|
335
|
+
promises.push(searchDuckDuckGo(query));
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
// Execute all searches concurrently
|
|
339
|
+
const results = await Promise.allSettled(promises);
|
|
340
|
+
|
|
341
|
+
// Combine all successful results
|
|
342
|
+
let searchResults: any[] = [];
|
|
343
|
+
let newsArticles: any[] = [];
|
|
344
|
+
|
|
345
|
+
results.forEach((result, index) => {
|
|
346
|
+
if (result.status === 'fulfilled' && Array.isArray(result.value)) {
|
|
347
|
+
if (includeWeb && index === results.length - 1) {
|
|
348
|
+
// Last promise is web search
|
|
349
|
+
searchResults = result.value;
|
|
350
|
+
} else if (includeNews) {
|
|
351
|
+
// Other promises are news sources
|
|
352
|
+
newsArticles = [...newsArticles, ...result.value];
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
});
|
|
356
|
+
|
|
357
|
+
// Remove duplicates and limit results
|
|
358
|
+
newsArticles = newsArticles
|
|
359
|
+
.filter((article, index, self) => index === self.findIndex((a: any) => a.title === article.title))
|
|
360
|
+
.slice(0, 8);
|
|
361
|
+
|
|
362
|
+
searchResults = searchResults.slice(0, 5);
|
|
363
|
+
|
|
364
|
+
const response = {
|
|
365
|
+
searchResults: includeWeb ? searchResults : [],
|
|
366
|
+
newsArticles: includeNews ? newsArticles : [],
|
|
367
|
+
summary: `Found ${searchResults.length} web results and ${newsArticles.length} news articles for "${query}"`,
|
|
368
|
+
sources: [
|
|
369
|
+
...(includeWeb ? ['DuckDuckGo'] : []),
|
|
370
|
+
...(includeNews ? ['NewsAPI', 'Reddit', 'Alternative Sources'] : []),
|
|
371
|
+
].filter(Boolean),
|
|
372
|
+
searchTime: Date.now(),
|
|
373
|
+
realData: true, // Flag to indicate this is real data
|
|
374
|
+
};
|
|
375
|
+
|
|
376
|
+
console.log(
|
|
377
|
+
`✅ Direct MCP search completed: ${response.searchResults.length} web + ${response.newsArticles.length} news results`,
|
|
378
|
+
);
|
|
379
|
+
|
|
380
|
+
return response;
|
|
381
|
+
} catch (error) {
|
|
382
|
+
console.error('🚨 Direct MCP search error:', error);
|
|
383
|
+
throw error;
|
|
384
|
+
}
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
// Helper function to detect if a query needs multi-message response
|
|
388
|
+
function needsMultiMessageResponse(query: string): boolean {
|
|
389
|
+
const multiMessageKeywords = [
|
|
390
|
+
'explain step by step',
|
|
391
|
+
'break down',
|
|
392
|
+
'tutorial',
|
|
393
|
+
'guide',
|
|
394
|
+
'how to',
|
|
395
|
+
'teach me',
|
|
396
|
+
'walk me through',
|
|
397
|
+
'step-by-step',
|
|
398
|
+
'detailed explanation',
|
|
399
|
+
'in detail',
|
|
400
|
+
'comprehensive',
|
|
401
|
+
'thorough',
|
|
402
|
+
'complete guide',
|
|
403
|
+
'coding',
|
|
404
|
+
'programming',
|
|
405
|
+
'development',
|
|
406
|
+
'build',
|
|
407
|
+
'create',
|
|
408
|
+
'multiple',
|
|
409
|
+
'several',
|
|
410
|
+
'various',
|
|
411
|
+
'different ways',
|
|
412
|
+
];
|
|
413
|
+
|
|
414
|
+
const lowerQuery = query.toLowerCase();
|
|
415
|
+
return multiMessageKeywords.some((keyword) => lowerQuery.includes(keyword)) || query.length > 200; // Long queries often benefit from multi-message responses
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
// Helper function to detect if a query needs real-time data
|
|
419
|
+
function needsRealTimeData(query: string): boolean {
|
|
420
|
+
const realTimeKeywords = [
|
|
421
|
+
'latest',
|
|
422
|
+
'recent',
|
|
423
|
+
'news',
|
|
424
|
+
'current',
|
|
425
|
+
'today',
|
|
426
|
+
'yesterday',
|
|
427
|
+
'breaking',
|
|
428
|
+
'update',
|
|
429
|
+
'now',
|
|
430
|
+
'price',
|
|
431
|
+
'stock',
|
|
432
|
+
'weather',
|
|
433
|
+
'trending',
|
|
434
|
+
'popular',
|
|
435
|
+
'search',
|
|
436
|
+
"what's happening",
|
|
437
|
+
'tell me about',
|
|
438
|
+
'find',
|
|
439
|
+
'look up',
|
|
440
|
+
'research',
|
|
441
|
+
];
|
|
442
|
+
|
|
443
|
+
const lowerQuery = query.toLowerCase();
|
|
444
|
+
return realTimeKeywords.some((keyword) => lowerQuery.includes(keyword));
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
// Helper function to format MCP data for the agent
|
|
448
|
+
function formatMCPDataForAgent(mcpData: MCPData): string {
|
|
449
|
+
let context = '';
|
|
450
|
+
|
|
451
|
+
if (mcpData.searchResults && mcpData.searchResults.length > 0) {
|
|
452
|
+
context += 'Current web search results:\n';
|
|
453
|
+
mcpData.searchResults.forEach((result, i) => {
|
|
454
|
+
context += `${i + 1}. **${result.title}**\n ${result.snippet}\n Source: ${result.url}\n\n`;
|
|
455
|
+
});
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
if (mcpData.newsArticles && mcpData.newsArticles.length > 0) {
|
|
459
|
+
context += 'Recent news articles:\n';
|
|
460
|
+
mcpData.newsArticles.forEach((article, i) => {
|
|
461
|
+
context += `${i + 1}. **${article.title}**\n ${article.description}\n Published: ${new Date(
|
|
462
|
+
article.publishedAt,
|
|
463
|
+
).toLocaleDateString()}\n Source: ${article.source.name}\n\n`;
|
|
464
|
+
});
|
|
465
|
+
}
|
|
466
|
+
|
|
467
|
+
if (context) {
|
|
468
|
+
context =
|
|
469
|
+
'Here is the current information I have access to:\n\n' +
|
|
470
|
+
context +
|
|
471
|
+
'\nPlease use this information to provide an accurate, up-to-date response.';
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
return context;
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
// Helper function to get the appropriate system prompt based on provider
|
|
478
|
+
function getSystemPrompt(provider: string, includeMCP: boolean = false): string {
|
|
479
|
+
const basePrompt = includeMCP
|
|
480
|
+
? 'You are a helpful AI assistant that can access real-time information through web search and news APIs. When provided with current information, use it to give accurate, up-to-date responses. Always cite your sources when referencing specific information from search results or news articles. You excel at providing comprehensive analysis and insights based on the latest data available.'
|
|
481
|
+
: 'You are a helpful AI assistant that provides detailed, step-by-step responses. Focus on the specific aspect requested and provide practical, actionable information.';
|
|
482
|
+
|
|
483
|
+
if (provider === 'anthropic') {
|
|
484
|
+
return basePrompt;
|
|
485
|
+
} else if (provider === 'openai') {
|
|
486
|
+
return basePrompt;
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
return basePrompt;
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
// Helper function to get conversation context from regular messages
|
|
493
|
+
function getConversationContext(regularMessages: any[]): string {
|
|
494
|
+
if (!regularMessages || regularMessages.length === 0) return '';
|
|
495
|
+
|
|
496
|
+
// Get the last few messages for context
|
|
497
|
+
const recentMessages = regularMessages.slice(-5);
|
|
498
|
+
let context = 'Recent conversation context:\n';
|
|
499
|
+
|
|
500
|
+
recentMessages.forEach((msg, index) => {
|
|
501
|
+
const author = msg.author?.username || msg.author?.givenName || 'User';
|
|
502
|
+
const content = msg.message?.substring(0, 200) || ''; // Limit content length
|
|
503
|
+
if (content.trim()) {
|
|
504
|
+
context += `${index + 1}. ${author}: ${content}\n`;
|
|
505
|
+
}
|
|
506
|
+
});
|
|
507
|
+
|
|
508
|
+
return context + '\n';
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
export const aiAgentMachine = createMachine(
|
|
512
|
+
{
|
|
513
|
+
/** @xstate-layout N4IgpgJg5mDOIC5QEMCWBBGA7ALgOlQgBswBiAZQFEA5AEQH0BZS889AcUoG0AGAXUSgADgHtYqHKhFZBIAB6IA7ACZleAGwBWdYoAsmngA4AzAE5Fi4wBoQAT0S6AjKbzHHy06aO7Du3eoBfAJs0TDBcAmIyAElqAAUAVQAVegBhAAl0ak5eASQQUXFJaVkFBE1dF0dvXx49Q19NG3sEXUUXHz9lRydFA2dNIJCMbHxCElJUgBlKdAAleko5uYB5OdzZQokpGXyyzU1jPFN9ZXVdHmN9dUNmxGNfPB7NRUNnR0N1ZX0hkFDRvDILDIIi2ABeqCwUFIG3yW2Ku1AZXcykUGlMhk0hh4PEcenU5zurV0R16yiupkODV+-3C+CBIPBkOhXEceWEYm2JT2iBRaPUGKxOLx-kJdkQ33UeA8ZnUxiuulRuhpIzpeAAZmAcABjAAWzNoyBwyFIEGkYAIWAAbiIANYWzU63WMVJxQ3G2Ecoo7Uq87FS8lnE6KardInGc54TTKF48bQVE4VQbBP6qiKOvUGo0msAAJ1zIlzeCERCN6sLAFsNVq9S63dnPQVOQjfQgPjwA8Yg21Q44idc8J8LspcapHMYeMqU7SIkIC9q4OIoabzZabfa8Ng80awHM4KIsLAwI34T6eW3-dKuwKezww+LynLjoZVDxPKiu20VWFZ-PF8zSDzAsixLMtK03cJtxwXd92kI8T2bM8kT9Ooo0sN9HEwrpLiJN4nkMSxTEwgjFHUONFG-AEgMLUg5koJI5gATQQ71uWQhByU0KMfDOao3zfZRbgfFQpVqaNtAJTRiMotVqNzCgaAYZhWA4bh+E2RC2PkCVjC4ioX3UPivFMQSiW+HhjlI-xugFD4Xgo6c03wOTSFiRIUgyLIcnUuFNMRbTWmUIlDN0fDsUxPofCMYwZIiFzplmBYllWdYfK9Ll-LKTjuIMoyBKEloEw0KS3zImUzAclMsBECA4FkGccA01jMsQABadQiVarjPB63revaWKxiiJqMtbTQTjwfijH4vxxtMIkXhcF8JzIoi5XaGLHJ-elgVBCEoRGltz2qJxpUcc5elIyk5X7S5Bx0HE4yIypLEG6snSzY1DqQgKehfPBh3ld9cTjawHycQxJq+Ui3wjEy8U24ZtuLP9YCXKBvq0rK4wsr5DhDLwe10fsYyeO930Mkw9HMN65MxlqEEeO9qbleVun8RQiRDULox0GMYwjTFAiCAIgA */
|
|
514
|
+
id: 'aiAgent',
|
|
515
|
+
types: {} as {
|
|
516
|
+
context: AIAgentContext;
|
|
517
|
+
events: AIAgentEvent;
|
|
518
|
+
},
|
|
519
|
+
initial: 'idle',
|
|
520
|
+
context: {
|
|
521
|
+
messages: [],
|
|
522
|
+
currentInput: '',
|
|
523
|
+
error: null,
|
|
524
|
+
isTyping: false,
|
|
525
|
+
multiMessageMode: false,
|
|
526
|
+
currentMultiStep: 0,
|
|
527
|
+
totalMultiSteps: 0,
|
|
528
|
+
multiMessagePlan: [],
|
|
529
|
+
mcpData: undefined,
|
|
530
|
+
regularMessages: [], // Add regular messages from query
|
|
531
|
+
},
|
|
532
|
+
states: {
|
|
533
|
+
idle: {
|
|
534
|
+
on: {
|
|
535
|
+
SEND_MESSAGE: {
|
|
536
|
+
target: 'analyzing',
|
|
537
|
+
guard: 'hasValidMessage',
|
|
538
|
+
actions: 'addUserMessage',
|
|
539
|
+
},
|
|
540
|
+
FORCE_MULTI_MESSAGE: {
|
|
541
|
+
target: 'analyzing',
|
|
542
|
+
guard: 'hasValidMessage',
|
|
543
|
+
actions: 'addUserMessage',
|
|
544
|
+
},
|
|
545
|
+
AUTO_RESPOND_TO_MESSAGE: {
|
|
546
|
+
target: 'processing',
|
|
547
|
+
guard: 'hasValidMessage',
|
|
548
|
+
actions: 'addAutoResponseMessage',
|
|
549
|
+
},
|
|
550
|
+
INPUT_CHANGE: {
|
|
551
|
+
actions: 'updateInput',
|
|
552
|
+
},
|
|
553
|
+
CLEAR_ERROR: {
|
|
554
|
+
actions: 'clearError',
|
|
555
|
+
},
|
|
556
|
+
UPDATE_REGULAR_MESSAGES: {
|
|
557
|
+
actions: 'updateRegularMessages',
|
|
558
|
+
},
|
|
559
|
+
ANALYZE_EXISTING_CONTEXT: {
|
|
560
|
+
target: 'analyzing',
|
|
561
|
+
guard: 'hasExistingContext',
|
|
562
|
+
actions: 'addContextAnalysisMessage',
|
|
563
|
+
},
|
|
564
|
+
CONTINUE_PROCESSING: {
|
|
565
|
+
// Just stay in idle state to trigger the useEffect again
|
|
566
|
+
actions: 'logContinueProcessing',
|
|
567
|
+
},
|
|
568
|
+
},
|
|
569
|
+
},
|
|
570
|
+
analyzing: {
|
|
571
|
+
entry: 'setTyping',
|
|
572
|
+
always: [
|
|
573
|
+
{
|
|
574
|
+
target: 'planningMultiMessage',
|
|
575
|
+
guard: 'shouldUseMultiMessage',
|
|
576
|
+
},
|
|
577
|
+
{
|
|
578
|
+
target: 'fetchingData',
|
|
579
|
+
guard: 'queryNeedsRealTimeData',
|
|
580
|
+
},
|
|
581
|
+
{
|
|
582
|
+
target: 'processing',
|
|
583
|
+
},
|
|
584
|
+
],
|
|
585
|
+
},
|
|
586
|
+
planningMultiMessage: {
|
|
587
|
+
entry: 'setMultiMessageMode',
|
|
588
|
+
invoke: {
|
|
589
|
+
id: 'generateMultiMessagePlan',
|
|
590
|
+
src: 'generateMultiMessagePlan',
|
|
591
|
+
input: ({ context }) => ({
|
|
592
|
+
query: context.messages[context.messages.length - 1]?.content || '',
|
|
593
|
+
regularMessages: context.regularMessages,
|
|
594
|
+
}),
|
|
595
|
+
onDone: {
|
|
596
|
+
target: 'multiProcessing',
|
|
597
|
+
actions: 'setMultiMessagePlan',
|
|
598
|
+
},
|
|
599
|
+
onError: {
|
|
600
|
+
target: 'processing', // Fall back to single message
|
|
601
|
+
},
|
|
602
|
+
},
|
|
603
|
+
},
|
|
604
|
+
multiProcessing: {
|
|
605
|
+
invoke: {
|
|
606
|
+
id: 'generateMultiMessage',
|
|
607
|
+
src: 'generateMultiMessage',
|
|
608
|
+
input: ({ context }) => ({
|
|
609
|
+
messages: context.messages,
|
|
610
|
+
plan: context.multiMessagePlan,
|
|
611
|
+
currentStep: context.currentMultiStep,
|
|
612
|
+
mcpData: context.mcpData,
|
|
613
|
+
regularMessages: context.regularMessages,
|
|
614
|
+
}),
|
|
615
|
+
onDone: [
|
|
616
|
+
{
|
|
617
|
+
target: 'waitingBetweenMessages',
|
|
618
|
+
guard: 'hasMoreMultiMessages',
|
|
619
|
+
actions: ['addAIMessage', 'incrementMultiStep'],
|
|
620
|
+
},
|
|
621
|
+
{
|
|
622
|
+
target: 'idle',
|
|
623
|
+
actions: ['addAIMessage', 'resetMultiMessage'],
|
|
624
|
+
},
|
|
625
|
+
],
|
|
626
|
+
onError: {
|
|
627
|
+
target: 'error',
|
|
628
|
+
actions: 'setError',
|
|
629
|
+
},
|
|
630
|
+
},
|
|
631
|
+
},
|
|
632
|
+
waitingBetweenMessages: {
|
|
633
|
+
entry: 'setTyping',
|
|
634
|
+
after: {
|
|
635
|
+
1500: 'multiProcessing',
|
|
636
|
+
},
|
|
637
|
+
},
|
|
638
|
+
fetchingData: {
|
|
639
|
+
invoke: {
|
|
640
|
+
id: 'fetchMCPData',
|
|
641
|
+
src: 'fetchMCPData',
|
|
642
|
+
input: ({ context }) => ({
|
|
643
|
+
query: context.messages[context.messages.length - 1]?.content || '',
|
|
644
|
+
}),
|
|
645
|
+
onDone: {
|
|
646
|
+
target: 'processing',
|
|
647
|
+
actions: 'setMCPData',
|
|
648
|
+
},
|
|
649
|
+
onError: {
|
|
650
|
+
target: 'processing',
|
|
651
|
+
},
|
|
652
|
+
},
|
|
653
|
+
},
|
|
654
|
+
processing: {
|
|
655
|
+
entry: () => {
|
|
656
|
+
console.log('🤖 Entering processing state');
|
|
657
|
+
},
|
|
658
|
+
invoke: {
|
|
659
|
+
id: 'generateResponse',
|
|
660
|
+
src: 'generateAIResponse',
|
|
661
|
+
input: ({ context }) => {
|
|
662
|
+
console.log('🤖 generateAIResponse input:', {
|
|
663
|
+
messagesCount: context.messages.length,
|
|
664
|
+
hasMessageToRespondTo: !!context.messageToRespondTo,
|
|
665
|
+
messageToRespondTo: context.messageToRespondTo,
|
|
666
|
+
regularMessagesCount: context.regularMessages?.length || 0,
|
|
667
|
+
});
|
|
668
|
+
return {
|
|
669
|
+
messages: context.messages,
|
|
670
|
+
mcpData: context.mcpData,
|
|
671
|
+
regularMessages: context.regularMessages,
|
|
672
|
+
messageToRespondTo: context.messageToRespondTo,
|
|
673
|
+
};
|
|
674
|
+
},
|
|
675
|
+
onDone: {
|
|
676
|
+
target: 'idle',
|
|
677
|
+
actions: [
|
|
678
|
+
assign(({ context, event }) => {
|
|
679
|
+
console.log('🤖 🎯 onDone transition triggered with event:', event);
|
|
680
|
+
console.log('🤖 🎯 Event type:', typeof event);
|
|
681
|
+
console.log('🤖 🎯 Event content:', event);
|
|
682
|
+
return context;
|
|
683
|
+
}),
|
|
684
|
+
'addAIMessage',
|
|
685
|
+
],
|
|
686
|
+
},
|
|
687
|
+
onError: {
|
|
688
|
+
target: 'error',
|
|
689
|
+
actions: 'setError',
|
|
690
|
+
},
|
|
691
|
+
},
|
|
692
|
+
},
|
|
693
|
+
error: {
|
|
694
|
+
on: {
|
|
695
|
+
RETRY: {
|
|
696
|
+
target: 'processing',
|
|
697
|
+
},
|
|
698
|
+
SEND_MESSAGE: {
|
|
699
|
+
target: 'analyzing',
|
|
700
|
+
guard: 'hasValidMessage',
|
|
701
|
+
actions: 'addUserMessage',
|
|
702
|
+
},
|
|
703
|
+
INPUT_CHANGE: {
|
|
704
|
+
actions: 'updateInput',
|
|
705
|
+
},
|
|
706
|
+
CLEAR_ERROR: {
|
|
707
|
+
target: 'idle',
|
|
708
|
+
actions: 'clearError',
|
|
709
|
+
},
|
|
710
|
+
UPDATE_REGULAR_MESSAGES: {
|
|
711
|
+
actions: 'updateRegularMessages',
|
|
712
|
+
},
|
|
713
|
+
},
|
|
714
|
+
},
|
|
715
|
+
},
|
|
716
|
+
},
|
|
717
|
+
{
|
|
718
|
+
guards: {
|
|
719
|
+
hasValidMessage: ({ event }) => {
|
|
720
|
+
console.log('🔍 hasValidMessage guard called with event:', event);
|
|
721
|
+
|
|
722
|
+
if (event.type === 'AUTO_RESPOND_TO_MESSAGE') {
|
|
723
|
+
const isValid = event.message?.trim().length > 0;
|
|
724
|
+
console.log('🔍 AUTO_RESPOND_TO_MESSAGE guard result:', isValid, 'message:', event.message);
|
|
725
|
+
return isValid;
|
|
726
|
+
}
|
|
727
|
+
|
|
728
|
+
if (event.type === 'SEND_MESSAGE' || event.type === 'FORCE_MULTI_MESSAGE') {
|
|
729
|
+
const isValid = event.message?.trim().length > 0;
|
|
730
|
+
console.log(
|
|
731
|
+
'🔍 SEND_MESSAGE/FORCE_MULTI_MESSAGE guard result:',
|
|
732
|
+
isValid,
|
|
733
|
+
'message:',
|
|
734
|
+
event.message,
|
|
735
|
+
);
|
|
736
|
+
return isValid;
|
|
737
|
+
}
|
|
738
|
+
|
|
739
|
+
console.log('🔍 Unknown event type in hasValidMessage guard:', event.type);
|
|
740
|
+
return false;
|
|
741
|
+
},
|
|
742
|
+
shouldUseMultiMessage: ({ context, event }) => {
|
|
743
|
+
if (event.type === 'FORCE_MULTI_MESSAGE') return true;
|
|
744
|
+
|
|
745
|
+
const lastMessage = context.messages[context.messages.length - 1];
|
|
746
|
+
if (!lastMessage || lastMessage.sender !== 'user') return false;
|
|
747
|
+
|
|
748
|
+
const shouldUse = needsMultiMessageResponse(lastMessage.content);
|
|
749
|
+
console.log('🔄 Should use multi-message:', shouldUse, 'for:', lastMessage.content);
|
|
750
|
+
return shouldUse;
|
|
751
|
+
},
|
|
752
|
+
queryNeedsRealTimeData: ({ context }) => {
|
|
753
|
+
const lastMessage = context.messages[context.messages.length - 1];
|
|
754
|
+
if (!lastMessage || lastMessage.sender !== 'user') return false;
|
|
755
|
+
|
|
756
|
+
const needs = needsRealTimeData(lastMessage.content);
|
|
757
|
+
console.log('🔍 Query needs real-time data:', needs, 'for:', lastMessage.content);
|
|
758
|
+
return needs;
|
|
759
|
+
},
|
|
760
|
+
hasMoreMultiMessages: ({ context }) => {
|
|
761
|
+
return context.currentMultiStep! < context.totalMultiSteps! - 1;
|
|
762
|
+
},
|
|
763
|
+
hasExistingContext: ({ context }) => {
|
|
764
|
+
return context.regularMessages && context.regularMessages.length > 0;
|
|
765
|
+
},
|
|
766
|
+
},
|
|
767
|
+
actions: {
|
|
768
|
+
addUserMessage: assign(({ context, event }) => {
|
|
769
|
+
if (event.type !== 'SEND_MESSAGE' && event.type !== 'FORCE_MULTI_MESSAGE') return context;
|
|
770
|
+
const newMessage: Message = {
|
|
771
|
+
id: Date.now().toString(),
|
|
772
|
+
content: event.message,
|
|
773
|
+
sender: 'user',
|
|
774
|
+
timestamp: new Date(),
|
|
775
|
+
};
|
|
776
|
+
return {
|
|
777
|
+
...context,
|
|
778
|
+
messages: [...context.messages, newMessage],
|
|
779
|
+
currentInput: '',
|
|
780
|
+
error: null,
|
|
781
|
+
mcpData: undefined,
|
|
782
|
+
messageToRespondTo: undefined, // Clear the message to respond to when adding new user message
|
|
783
|
+
};
|
|
784
|
+
}),
|
|
785
|
+
addAIMessage: assign(({ context, event }) => {
|
|
786
|
+
console.log('🤖 addAIMessage called with event:', event);
|
|
787
|
+
|
|
788
|
+
// Extract the AI response content from the event
|
|
789
|
+
let aiResponseContent = '';
|
|
790
|
+
|
|
791
|
+
// Handle different event formats
|
|
792
|
+
if (event && typeof event === 'object') {
|
|
793
|
+
if ('output' in event) {
|
|
794
|
+
aiResponseContent = (event as any).output;
|
|
795
|
+
} else if ('data' in event) {
|
|
796
|
+
aiResponseContent = (event as any).data;
|
|
797
|
+
} else if (typeof event === 'string') {
|
|
798
|
+
// Direct string response from generateAIResponse actor
|
|
799
|
+
aiResponseContent = event;
|
|
800
|
+
} else {
|
|
801
|
+
console.error('🤖 Could not extract AI response content from event:', event);
|
|
802
|
+
aiResponseContent = 'Sorry, I encountered an error generating the response.';
|
|
803
|
+
}
|
|
804
|
+
} else if (typeof event === 'string') {
|
|
805
|
+
// Direct string response from generateAIResponse actor
|
|
806
|
+
aiResponseContent = event;
|
|
807
|
+
} else {
|
|
808
|
+
console.error('🤖 Could not extract AI response content from event:', event);
|
|
809
|
+
aiResponseContent = 'Sorry, I encountered an error generating the response.';
|
|
810
|
+
}
|
|
811
|
+
|
|
812
|
+
console.log('🤖 AI response content:', aiResponseContent.substring(0, 100) + '...');
|
|
813
|
+
|
|
814
|
+
const newMessage: Message = {
|
|
815
|
+
id: (Date.now() + 1).toString(),
|
|
816
|
+
content: aiResponseContent,
|
|
817
|
+
sender: 'ai',
|
|
818
|
+
timestamp: new Date(),
|
|
819
|
+
};
|
|
820
|
+
|
|
821
|
+
console.log('🤖 Adding AI message to context:', newMessage);
|
|
822
|
+
|
|
823
|
+
return {
|
|
824
|
+
...context,
|
|
825
|
+
messages: [...context.messages, newMessage],
|
|
826
|
+
isTyping: false,
|
|
827
|
+
error: null,
|
|
828
|
+
messageToRespondTo: undefined, // Clear the message to respond to after processing
|
|
829
|
+
};
|
|
830
|
+
}),
|
|
831
|
+
updateInput: assign(({ context, event }) => {
|
|
832
|
+
if (event.type !== 'INPUT_CHANGE') return context;
|
|
833
|
+
return {
|
|
834
|
+
...context,
|
|
835
|
+
currentInput: event.value,
|
|
836
|
+
};
|
|
837
|
+
}),
|
|
838
|
+
updateRegularMessages: assign(({ context, event }) => {
|
|
839
|
+
if (event.type !== 'UPDATE_REGULAR_MESSAGES') return context;
|
|
840
|
+
return {
|
|
841
|
+
...context,
|
|
842
|
+
regularMessages: event.messages || [],
|
|
843
|
+
};
|
|
844
|
+
}),
|
|
845
|
+
setMultiMessageMode: assign(({ context }) => ({
|
|
846
|
+
...context,
|
|
847
|
+
multiMessageMode: true,
|
|
848
|
+
currentMultiStep: 0,
|
|
849
|
+
})),
|
|
850
|
+
setMultiMessagePlan: assign(({ context, event }) => {
|
|
851
|
+
const plan = (event as unknown as { output: { plan: string[]; totalSteps: number } }).output;
|
|
852
|
+
console.log('📝 Multi-message plan created:', plan.totalSteps, 'steps');
|
|
853
|
+
return {
|
|
854
|
+
...context,
|
|
855
|
+
multiMessagePlan: plan.plan,
|
|
856
|
+
totalMultiSteps: plan.totalSteps,
|
|
857
|
+
};
|
|
858
|
+
}),
|
|
859
|
+
incrementMultiStep: assign(({ context }) => ({
|
|
860
|
+
...context,
|
|
861
|
+
currentMultiStep: context.currentMultiStep! + 1,
|
|
862
|
+
})),
|
|
863
|
+
logContinueProcessing: () => {
|
|
864
|
+
console.log('🤖 Continue processing triggered - machine will stay in idle state');
|
|
865
|
+
},
|
|
866
|
+
resetMultiMessage: assign(({ context }) => ({
|
|
867
|
+
...context,
|
|
868
|
+
multiMessageMode: false,
|
|
869
|
+
currentMultiStep: 0,
|
|
870
|
+
totalMultiSteps: 0,
|
|
871
|
+
multiMessagePlan: [],
|
|
872
|
+
isTyping: false,
|
|
873
|
+
})),
|
|
874
|
+
setTyping: assign(({ context }) => ({
|
|
875
|
+
...context,
|
|
876
|
+
isTyping: true,
|
|
877
|
+
})),
|
|
878
|
+
setMCPData: assign(({ context, event }) => {
|
|
879
|
+
console.log('📊 MCP data fetched successfully');
|
|
880
|
+
return {
|
|
881
|
+
...context,
|
|
882
|
+
mcpData: (event as unknown as { output: MCPData }).output,
|
|
883
|
+
};
|
|
884
|
+
}),
|
|
885
|
+
setError: assign(({ context, event }) => {
|
|
886
|
+
console.error('❌ AI processing error:', event);
|
|
887
|
+
let errorMessage = 'An unexpected error occurred';
|
|
888
|
+
|
|
889
|
+
const error = (event as { error: unknown }).error;
|
|
890
|
+
if (error instanceof Error) {
|
|
891
|
+
errorMessage = error.message;
|
|
892
|
+
} else if (typeof error === 'string') {
|
|
893
|
+
errorMessage = error;
|
|
894
|
+
}
|
|
895
|
+
|
|
896
|
+
if (errorMessage.includes('API key')) {
|
|
897
|
+
errorMessage = 'API key not configured. Please add your API key in Model Settings.';
|
|
898
|
+
} else if (errorMessage.includes('rate limit')) {
|
|
899
|
+
errorMessage = 'Rate limit exceeded. Please try again in a moment.';
|
|
900
|
+
} else if (errorMessage.includes('network') || errorMessage.includes('fetch')) {
|
|
901
|
+
errorMessage = 'Network error. Please check your connection and try again.';
|
|
902
|
+
}
|
|
903
|
+
|
|
904
|
+
return {
|
|
905
|
+
...context,
|
|
906
|
+
error: errorMessage,
|
|
907
|
+
isTyping: false,
|
|
908
|
+
};
|
|
909
|
+
}),
|
|
910
|
+
clearError: assign(({ context }) => ({
|
|
911
|
+
...context,
|
|
912
|
+
error: null,
|
|
913
|
+
})),
|
|
914
|
+
addContextAnalysisMessage: assign(({ context }) => {
|
|
915
|
+
const newMessage: Message = {
|
|
916
|
+
id: (Date.now() + 2).toString(),
|
|
917
|
+
content:
|
|
918
|
+
'Please analyze the existing conversation and provide insights, continue the discussion, or ask relevant follow-up questions based on the context.',
|
|
919
|
+
sender: 'user',
|
|
920
|
+
timestamp: new Date(),
|
|
921
|
+
};
|
|
922
|
+
return {
|
|
923
|
+
...context,
|
|
924
|
+
messages: [...context.messages, newMessage],
|
|
925
|
+
currentInput: '',
|
|
926
|
+
error: null,
|
|
927
|
+
};
|
|
928
|
+
}),
|
|
929
|
+
addAutoResponseMessage: assign(({ context, event }) => {
|
|
930
|
+
if (event.type !== 'AUTO_RESPOND_TO_MESSAGE') return context;
|
|
931
|
+
|
|
932
|
+
console.log('🤖 addAutoResponseMessage called with event:', event);
|
|
933
|
+
console.log('🤖 Current context before update:', {
|
|
934
|
+
messagesCount: context.messages.length,
|
|
935
|
+
hasMessageToRespondTo: !!context.messageToRespondTo,
|
|
936
|
+
messageToRespondTo: context.messageToRespondTo,
|
|
937
|
+
});
|
|
938
|
+
|
|
939
|
+
// For auto-responses, we store the message content for AI processing
|
|
940
|
+
// but don't add it as a new user message to avoid duplicates
|
|
941
|
+
const updatedContext = {
|
|
942
|
+
...context,
|
|
943
|
+
currentInput: '',
|
|
944
|
+
error: null,
|
|
945
|
+
mcpData: undefined,
|
|
946
|
+
// Store the message to respond to without adding it to messages array
|
|
947
|
+
messageToRespondTo: event.message,
|
|
948
|
+
};
|
|
949
|
+
|
|
950
|
+
console.log('🤖 Context updated with messageToRespondTo:', event.message);
|
|
951
|
+
console.log('🤖 Updated context:', {
|
|
952
|
+
messagesCount: updatedContext.messages.length,
|
|
953
|
+
hasMessageToRespondTo: !!updatedContext.messageToRespondTo,
|
|
954
|
+
messageToRespondTo: updatedContext.messageToRespondTo,
|
|
955
|
+
});
|
|
956
|
+
|
|
957
|
+
return updatedContext;
|
|
958
|
+
}),
|
|
959
|
+
},
|
|
960
|
+
actors: {
|
|
961
|
+
generateMultiMessagePlan: fromPromise(
|
|
962
|
+
async ({ input }: { input: { query: string; regularMessages: any[] } }) => {
|
|
963
|
+
try {
|
|
964
|
+
console.log('📋 Generating multi-message plan for:', input.query);
|
|
965
|
+
|
|
966
|
+
// Check if we have an available AI provider
|
|
967
|
+
if (!API_CONFIG.availableProvider) {
|
|
968
|
+
throw new Error(
|
|
969
|
+
'No AI API key configured. Please set VITE_ANTHROPIC_API_KEY or VITE_OPENAI_API_KEY in your environment.',
|
|
970
|
+
);
|
|
971
|
+
}
|
|
972
|
+
|
|
973
|
+
console.log(`🤖 Using ${API_CONFIG.availableProvider} provider for multi-message planning`);
|
|
974
|
+
|
|
975
|
+
// Add conversation context if available
|
|
976
|
+
let systemPrompt = `You are a planning assistant. Break down complex queries into 3-5 independent messages that can be sent sequentially.
|
|
977
|
+
|
|
978
|
+
Each message should be a complete, standalone response that builds upon the previous ones.
|
|
979
|
+
|
|
980
|
+
Return ONLY a JSON object in this exact format:
|
|
981
|
+
{
|
|
982
|
+
"plan": ["First message topic/focus", "Second message topic/focus", "Third message topic/focus"],
|
|
983
|
+
"totalSteps": 3
|
|
984
|
+
}
|
|
985
|
+
|
|
986
|
+
Keep each step concise but descriptive. Make sure the breakdown makes sense for step-by-step explanations.`;
|
|
987
|
+
|
|
988
|
+
if (input.regularMessages && input.regularMessages.length > 0) {
|
|
989
|
+
const conversationContext = getConversationContext(input.regularMessages);
|
|
990
|
+
systemPrompt = `${systemPrompt}\n\n${conversationContext}`;
|
|
991
|
+
}
|
|
992
|
+
|
|
993
|
+
const response = await makeApiCall(API_CONFIG.primaryEndpoint, {
|
|
994
|
+
method: 'POST',
|
|
995
|
+
headers: {
|
|
996
|
+
'Content-Type': 'application/json',
|
|
997
|
+
},
|
|
998
|
+
body: JSON.stringify({
|
|
999
|
+
model: API_CONFIG.primaryModel,
|
|
1000
|
+
max_tokens: 500,
|
|
1001
|
+
system: systemPrompt,
|
|
1002
|
+
messages: [
|
|
1003
|
+
{
|
|
1004
|
+
role: 'user',
|
|
1005
|
+
content: `Please create a multi-message plan for this query: "${input.query}"`,
|
|
1006
|
+
},
|
|
1007
|
+
],
|
|
1008
|
+
}),
|
|
1009
|
+
});
|
|
1010
|
+
|
|
1011
|
+
const data = await response.json();
|
|
1012
|
+
const planText = data.content[0]?.text || '';
|
|
1013
|
+
|
|
1014
|
+
// Parse the JSON response
|
|
1015
|
+
try {
|
|
1016
|
+
const plan = JSON.parse(planText);
|
|
1017
|
+
console.log('✅ Multi-message plan created:', plan);
|
|
1018
|
+
return plan;
|
|
1019
|
+
} catch {
|
|
1020
|
+
// Fallback plan if parsing fails
|
|
1021
|
+
return {
|
|
1022
|
+
plan: [
|
|
1023
|
+
'Introduction and overview',
|
|
1024
|
+
'Detailed explanation with examples',
|
|
1025
|
+
'Summary and next steps',
|
|
1026
|
+
],
|
|
1027
|
+
totalSteps: 3,
|
|
1028
|
+
};
|
|
1029
|
+
}
|
|
1030
|
+
} catch (error) {
|
|
1031
|
+
console.error('🚨 Multi-message planning error:', error);
|
|
1032
|
+
throw error;
|
|
1033
|
+
}
|
|
1034
|
+
},
|
|
1035
|
+
),
|
|
1036
|
+
|
|
1037
|
+
generateMultiMessage: fromPromise(
|
|
1038
|
+
async ({
|
|
1039
|
+
input,
|
|
1040
|
+
}: {
|
|
1041
|
+
input: {
|
|
1042
|
+
messages: Message[];
|
|
1043
|
+
plan?: string[];
|
|
1044
|
+
currentStep: number;
|
|
1045
|
+
mcpData?: MCPData;
|
|
1046
|
+
regularMessages: any[];
|
|
1047
|
+
};
|
|
1048
|
+
}) => {
|
|
1049
|
+
console.log(`🔄 Generating multi-message ${input.currentStep + 1}/${input.plan?.length || 1}`);
|
|
1050
|
+
|
|
1051
|
+
try {
|
|
1052
|
+
// Check if we have an available AI provider
|
|
1053
|
+
if (!API_CONFIG.availableProvider) {
|
|
1054
|
+
throw new Error(
|
|
1055
|
+
'No AI API key configured. Please set VITE_ANTHROPIC_API_KEY or VITE_OPENAI_API_KEY in your environment.',
|
|
1056
|
+
);
|
|
1057
|
+
}
|
|
1058
|
+
|
|
1059
|
+
console.log(`🤖 Using ${API_CONFIG.availableProvider} provider for multi-message generation`);
|
|
1060
|
+
const userMessages = input.messages.filter((msg) => msg.sender === 'user');
|
|
1061
|
+
const latestUserMessage = userMessages[userMessages.length - 1];
|
|
1062
|
+
|
|
1063
|
+
if (!latestUserMessage) {
|
|
1064
|
+
throw new Error('No user message found');
|
|
1065
|
+
}
|
|
1066
|
+
|
|
1067
|
+
const currentStepTopic = input.plan?.[input.currentStep] || 'General response';
|
|
1068
|
+
const stepNumber = input.currentStep + 1;
|
|
1069
|
+
const totalSteps = input.plan?.length || 1;
|
|
1070
|
+
|
|
1071
|
+
let prompt = `User's original question: "${latestUserMessage.content}"
|
|
1072
|
+
|
|
1073
|
+
This is message ${stepNumber} of ${totalSteps} in a multi-message response.
|
|
1074
|
+
Focus specifically on: ${currentStepTopic}
|
|
1075
|
+
|
|
1076
|
+
Guidelines:
|
|
1077
|
+
- Start with a clear indicator like "**Step ${stepNumber}/${totalSteps}: ${currentStepTopic}**"
|
|
1078
|
+
- Provide a complete, standalone response for this specific aspect
|
|
1079
|
+
- Keep it focused and detailed but not overwhelming
|
|
1080
|
+
- Use examples, code snippets, or bullet points where helpful
|
|
1081
|
+
- End with a brief transition to what's coming next (unless it's the final message)`;
|
|
1082
|
+
|
|
1083
|
+
// Add conversation context if available
|
|
1084
|
+
if (input.regularMessages && input.regularMessages.length > 0) {
|
|
1085
|
+
const conversationContext = getConversationContext(input.regularMessages);
|
|
1086
|
+
prompt = `${conversationContext}${prompt}`;
|
|
1087
|
+
}
|
|
1088
|
+
|
|
1089
|
+
// Add MCP context if available
|
|
1090
|
+
if (input.mcpData) {
|
|
1091
|
+
const mcpContext = formatMCPDataForAgent(input.mcpData);
|
|
1092
|
+
if (mcpContext) {
|
|
1093
|
+
prompt = `${mcpContext}\n\n${prompt}`;
|
|
1094
|
+
}
|
|
1095
|
+
}
|
|
1096
|
+
|
|
1097
|
+
const response = await makeApiCall(API_CONFIG.primaryEndpoint, {
|
|
1098
|
+
method: 'POST',
|
|
1099
|
+
headers: {
|
|
1100
|
+
'Content-Type': 'application/json',
|
|
1101
|
+
},
|
|
1102
|
+
body: JSON.stringify({
|
|
1103
|
+
model: API_CONFIG.primaryModel,
|
|
1104
|
+
max_tokens: 800,
|
|
1105
|
+
system: getSystemPrompt(API_CONFIG.availableProvider || 'anthropic', false),
|
|
1106
|
+
messages: [
|
|
1107
|
+
...input.messages.slice(-5).map((msg) => ({
|
|
1108
|
+
role: msg.sender === 'user' ? 'user' : 'assistant',
|
|
1109
|
+
content: msg.content,
|
|
1110
|
+
})),
|
|
1111
|
+
{
|
|
1112
|
+
role: 'user',
|
|
1113
|
+
content: prompt,
|
|
1114
|
+
},
|
|
1115
|
+
],
|
|
1116
|
+
}),
|
|
1117
|
+
});
|
|
1118
|
+
|
|
1119
|
+
const data = await response.json();
|
|
1120
|
+
return data.content[0]?.text || "Sorry, I couldn't generate a response for this step.";
|
|
1121
|
+
} catch (error) {
|
|
1122
|
+
console.error('🚨 Multi-message generation error:', error);
|
|
1123
|
+
throw error;
|
|
1124
|
+
}
|
|
1125
|
+
},
|
|
1126
|
+
),
|
|
1127
|
+
|
|
1128
|
+
fetchMCPData: fromPromise(async ({ input }: { input: { query: string } }) => {
|
|
1129
|
+
try {
|
|
1130
|
+
console.log('🌐 Fetching MCP data for query:', input.query);
|
|
1131
|
+
|
|
1132
|
+
const response = await performDirectMCPSearch(input.query, true, true);
|
|
1133
|
+
console.log('✅ MCP data fetched:', response.summary);
|
|
1134
|
+
|
|
1135
|
+
return response;
|
|
1136
|
+
} catch (error) {
|
|
1137
|
+
console.error('🚨 MCP fetch error:', error);
|
|
1138
|
+
throw error;
|
|
1139
|
+
}
|
|
1140
|
+
}),
|
|
1141
|
+
|
|
1142
|
+
generateAIResponse: fromPromise(
|
|
1143
|
+
async ({
|
|
1144
|
+
input,
|
|
1145
|
+
}: {
|
|
1146
|
+
input: {
|
|
1147
|
+
messages: Message[];
|
|
1148
|
+
mcpData?: MCPData;
|
|
1149
|
+
regularMessages: any[];
|
|
1150
|
+
messageToRespondTo?: string;
|
|
1151
|
+
};
|
|
1152
|
+
}) => {
|
|
1153
|
+
console.log('🤖 Generating response using server-side AI proxy');
|
|
1154
|
+
console.log('🤖 Input received:', {
|
|
1155
|
+
messagesCount: input.messages.length,
|
|
1156
|
+
hasMCPData: !!input.mcpData,
|
|
1157
|
+
regularMessagesCount: input.regularMessages?.length || 0,
|
|
1158
|
+
messageToRespondTo: input.messageToRespondTo,
|
|
1159
|
+
});
|
|
1160
|
+
|
|
1161
|
+
try {
|
|
1162
|
+
// Check if we have an available AI provider
|
|
1163
|
+
if (!API_CONFIG.availableProvider) {
|
|
1164
|
+
throw new Error(
|
|
1165
|
+
'No AI API key configured. Please set VITE_ANTHROPIC_API_KEY or VITE_OPENAI_API_KEY in your environment.',
|
|
1166
|
+
);
|
|
1167
|
+
}
|
|
1168
|
+
|
|
1169
|
+
console.log(`🤖 Using ${API_CONFIG.availableProvider} provider for AI response generation`);
|
|
1170
|
+
|
|
1171
|
+
// Get the latest user message or the message to respond to
|
|
1172
|
+
let prompt: string;
|
|
1173
|
+
let userMessages = input.messages.filter((msg) => msg.sender === 'user');
|
|
1174
|
+
|
|
1175
|
+
if (input.messageToRespondTo) {
|
|
1176
|
+
// Use the message to respond to for auto-responses
|
|
1177
|
+
prompt = input.messageToRespondTo;
|
|
1178
|
+
console.log('🤖 Auto-responding to message:', prompt.substring(0, 50) + '...');
|
|
1179
|
+
} else if (userMessages.length > 0) {
|
|
1180
|
+
const latestUserMessage = userMessages[userMessages.length - 1];
|
|
1181
|
+
prompt = latestUserMessage.content;
|
|
1182
|
+
} else {
|
|
1183
|
+
// If no user messages, this might be an auto-response
|
|
1184
|
+
prompt = 'Please provide a helpful response to the conversation context.';
|
|
1185
|
+
}
|
|
1186
|
+
|
|
1187
|
+
// Add conversation context if available
|
|
1188
|
+
if (input.regularMessages && input.regularMessages.length > 0) {
|
|
1189
|
+
const conversationContext = getConversationContext(input.regularMessages);
|
|
1190
|
+
prompt = `${conversationContext}User question: ${prompt}`;
|
|
1191
|
+
console.log(
|
|
1192
|
+
'🚀 Enhanced prompt with conversation context - Recent messages:',
|
|
1193
|
+
input.regularMessages.length,
|
|
1194
|
+
);
|
|
1195
|
+
}
|
|
1196
|
+
|
|
1197
|
+
// Add MCP context if available
|
|
1198
|
+
if (input.mcpData) {
|
|
1199
|
+
const mcpContext = formatMCPDataForAgent(input.mcpData);
|
|
1200
|
+
if (mcpContext) {
|
|
1201
|
+
prompt = `${mcpContext}\n\nUser question: ${prompt}`;
|
|
1202
|
+
console.log(
|
|
1203
|
+
'🚀 Enhanced prompt with MCP data - Web results:',
|
|
1204
|
+
input.mcpData.searchResults?.length || 0,
|
|
1205
|
+
'News articles:',
|
|
1206
|
+
input.mcpData.newsArticles?.length || 0,
|
|
1207
|
+
);
|
|
1208
|
+
}
|
|
1209
|
+
}
|
|
1210
|
+
|
|
1211
|
+
// Use Vite proxy for AI API calls (avoids CORS issues)
|
|
1212
|
+
const controller = new AbortController();
|
|
1213
|
+
const timeoutId = setTimeout(() => controller.abort(), 15000); // Increased to 15s timeout for better reliability
|
|
1214
|
+
|
|
1215
|
+
try {
|
|
1216
|
+
const response = await makeApiCall(API_CONFIG.primaryEndpoint, {
|
|
1217
|
+
method: 'POST',
|
|
1218
|
+
headers: {
|
|
1219
|
+
'Content-Type': 'application/json',
|
|
1220
|
+
},
|
|
1221
|
+
body: JSON.stringify({
|
|
1222
|
+
model: API_CONFIG.primaryModel,
|
|
1223
|
+
max_tokens: 300, // Further reduced for faster responses
|
|
1224
|
+
system: getSystemPrompt(API_CONFIG.availableProvider || 'anthropic', true),
|
|
1225
|
+
messages: [
|
|
1226
|
+
// Only include the last 2 messages for faster processing
|
|
1227
|
+
...input.messages.slice(-2).map((msg) => ({
|
|
1228
|
+
role: msg.sender === 'user' ? 'user' : 'assistant',
|
|
1229
|
+
content: msg.content,
|
|
1230
|
+
})),
|
|
1231
|
+
{
|
|
1232
|
+
role: 'user',
|
|
1233
|
+
content: prompt,
|
|
1234
|
+
},
|
|
1235
|
+
],
|
|
1236
|
+
}),
|
|
1237
|
+
signal: controller.signal,
|
|
1238
|
+
});
|
|
1239
|
+
|
|
1240
|
+
clearTimeout(timeoutId);
|
|
1241
|
+
|
|
1242
|
+
const data = await response.json();
|
|
1243
|
+
console.log('🤖 AI API response received:', data);
|
|
1244
|
+
|
|
1245
|
+
const aiResponseText =
|
|
1246
|
+
data.content[0]?.text ||
|
|
1247
|
+
data.choices?.[0]?.message?.content ||
|
|
1248
|
+
"Sorry, I couldn't generate a response.";
|
|
1249
|
+
console.log('🤖 Extracted AI response text:', aiResponseText.substring(0, 100) + '...');
|
|
1250
|
+
|
|
1251
|
+
console.log(
|
|
1252
|
+
'🤖 ✅ generateAIResponse actor completing successfully with text length:',
|
|
1253
|
+
aiResponseText.length,
|
|
1254
|
+
);
|
|
1255
|
+
return aiResponseText;
|
|
1256
|
+
} catch (error) {
|
|
1257
|
+
clearTimeout(timeoutId);
|
|
1258
|
+
if (error.name === 'AbortError') {
|
|
1259
|
+
throw new Error('AI response generation timed out. Please try again.');
|
|
1260
|
+
}
|
|
1261
|
+
throw error;
|
|
1262
|
+
}
|
|
1263
|
+
} catch (error) {
|
|
1264
|
+
console.error('🚨 AI generation error:', error);
|
|
1265
|
+
|
|
1266
|
+
if (error instanceof Error) {
|
|
1267
|
+
if (error.message.includes('API key')) {
|
|
1268
|
+
throw new Error(
|
|
1269
|
+
'AI API key is not configured. Please set your API keys in the environment variables.',
|
|
1270
|
+
);
|
|
1271
|
+
}
|
|
1272
|
+
if (error.message.includes('rate limit')) {
|
|
1273
|
+
throw new Error('Rate limit exceeded. Please try again in a moment.');
|
|
1274
|
+
}
|
|
1275
|
+
if (error.message.includes('network') || error.message.includes('fetch')) {
|
|
1276
|
+
throw new Error(
|
|
1277
|
+
'Network error. Please check your connection and ensure your Vite dev server is running on localhost:3011.',
|
|
1278
|
+
);
|
|
1279
|
+
}
|
|
1280
|
+
throw error; // Re-throw the Error instance
|
|
1281
|
+
}
|
|
1282
|
+
|
|
1283
|
+
// If error is not an Error instance, create a proper Error with string message
|
|
1284
|
+
const errorMessage =
|
|
1285
|
+
typeof error === 'string'
|
|
1286
|
+
? error
|
|
1287
|
+
: error && typeof error === 'object' && 'message' in error
|
|
1288
|
+
? String(error.message)
|
|
1289
|
+
: 'An unexpected error occurred while generating the response';
|
|
1290
|
+
throw new Error(errorMessage);
|
|
1291
|
+
}
|
|
1292
|
+
},
|
|
1293
|
+
),
|
|
1294
|
+
},
|
|
1295
|
+
},
|
|
1296
|
+
);
|