nitrostack 1.0.71 → 1.0.73

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (253) hide show
  1. package/dist/auth/api-key.js.map +1 -1
  2. package/dist/auth/client.js.map +1 -1
  3. package/dist/auth/index.d.ts +2 -1
  4. package/dist/auth/index.d.ts.map +1 -1
  5. package/dist/auth/index.js +3 -0
  6. package/dist/auth/index.js.map +1 -1
  7. package/dist/auth/middleware.d.ts +1 -1
  8. package/dist/auth/middleware.d.ts.map +1 -1
  9. package/dist/auth/middleware.js.map +1 -1
  10. package/dist/auth/secure-secret.d.ts +136 -0
  11. package/dist/auth/secure-secret.d.ts.map +1 -0
  12. package/dist/auth/secure-secret.js +182 -0
  13. package/dist/auth/secure-secret.js.map +1 -0
  14. package/dist/auth/server-metadata.d.ts.map +1 -1
  15. package/dist/auth/server-metadata.js.map +1 -1
  16. package/dist/auth/simple-jwt.d.ts +100 -14
  17. package/dist/auth/simple-jwt.d.ts.map +1 -1
  18. package/dist/auth/simple-jwt.js +19 -9
  19. package/dist/auth/simple-jwt.js.map +1 -1
  20. package/dist/auth/token-store.js +1 -1
  21. package/dist/auth/token-store.js.map +1 -1
  22. package/dist/auth/token-validation.js +1 -1
  23. package/dist/auth/token-validation.js.map +1 -1
  24. package/dist/cli/commands/build.js +1 -1
  25. package/dist/cli/commands/build.js.map +1 -1
  26. package/dist/cli/commands/generate-types.js +12 -12
  27. package/dist/cli/commands/generate-types.js.map +1 -1
  28. package/dist/cli/commands/generate.d.ts +8 -1
  29. package/dist/cli/commands/generate.d.ts.map +1 -1
  30. package/dist/cli/commands/generate.js +13 -12
  31. package/dist/cli/commands/generate.js.map +1 -1
  32. package/dist/cli/commands/init.js +1 -1
  33. package/dist/cli/commands/init.js.map +1 -1
  34. package/dist/cli/commands/upgrade.d.ts +10 -0
  35. package/dist/cli/commands/upgrade.d.ts.map +1 -0
  36. package/dist/cli/commands/upgrade.js +221 -0
  37. package/dist/cli/commands/upgrade.js.map +1 -0
  38. package/dist/cli/index.js +7 -0
  39. package/dist/cli/index.js.map +1 -1
  40. package/dist/core/app-decorator.d.ts +4 -3
  41. package/dist/core/app-decorator.d.ts.map +1 -1
  42. package/dist/core/app-decorator.js +67 -28
  43. package/dist/core/app-decorator.js.map +1 -1
  44. package/dist/core/builders.d.ts +19 -7
  45. package/dist/core/builders.d.ts.map +1 -1
  46. package/dist/core/builders.js +15 -8
  47. package/dist/core/builders.js.map +1 -1
  48. package/dist/core/component.d.ts +8 -8
  49. package/dist/core/component.d.ts.map +1 -1
  50. package/dist/core/component.js +3 -2
  51. package/dist/core/component.js.map +1 -1
  52. package/dist/core/config-module.d.ts +11 -4
  53. package/dist/core/config-module.d.ts.map +1 -1
  54. package/dist/core/config-module.js +1 -1
  55. package/dist/core/config-module.js.map +1 -1
  56. package/dist/core/decorators/cache.decorator.d.ts +9 -9
  57. package/dist/core/decorators/cache.decorator.d.ts.map +1 -1
  58. package/dist/core/decorators/cache.decorator.js +3 -3
  59. package/dist/core/decorators/cache.decorator.js.map +1 -1
  60. package/dist/core/decorators/health-check.decorator.d.ts +3 -3
  61. package/dist/core/decorators/health-check.decorator.d.ts.map +1 -1
  62. package/dist/core/decorators/health-check.decorator.js +2 -2
  63. package/dist/core/decorators/health-check.decorator.js.map +1 -1
  64. package/dist/core/decorators/rate-limit.decorator.d.ts +5 -4
  65. package/dist/core/decorators/rate-limit.decorator.d.ts.map +1 -1
  66. package/dist/core/decorators/rate-limit.decorator.js +3 -3
  67. package/dist/core/decorators/rate-limit.decorator.js.map +1 -1
  68. package/dist/core/decorators.d.ts +47 -29
  69. package/dist/core/decorators.d.ts.map +1 -1
  70. package/dist/core/decorators.js +9 -9
  71. package/dist/core/decorators.js.map +1 -1
  72. package/dist/core/di/container.d.ts +21 -4
  73. package/dist/core/di/container.d.ts.map +1 -1
  74. package/dist/core/di/container.js +11 -7
  75. package/dist/core/di/container.js.map +1 -1
  76. package/dist/core/di/injectable.decorator.d.ts +5 -3
  77. package/dist/core/di/injectable.decorator.d.ts.map +1 -1
  78. package/dist/core/di/injectable.decorator.js.map +1 -1
  79. package/dist/core/errors.d.ts +4 -4
  80. package/dist/core/errors.d.ts.map +1 -1
  81. package/dist/core/errors.js.map +1 -1
  82. package/dist/core/events/event-emitter.d.ts +3 -3
  83. package/dist/core/events/event-emitter.d.ts.map +1 -1
  84. package/dist/core/events/event-emitter.js.map +1 -1
  85. package/dist/core/events/event.decorator.d.ts +5 -5
  86. package/dist/core/events/event.decorator.d.ts.map +1 -1
  87. package/dist/core/events/event.decorator.js +10 -6
  88. package/dist/core/events/event.decorator.js.map +1 -1
  89. package/dist/core/events/log-emitter.d.ts +7 -1
  90. package/dist/core/events/log-emitter.d.ts.map +1 -1
  91. package/dist/core/events/log-emitter.js.map +1 -1
  92. package/dist/core/filters/exception-filter.decorator.d.ts +5 -5
  93. package/dist/core/filters/exception-filter.decorator.d.ts.map +1 -1
  94. package/dist/core/filters/exception-filter.decorator.js +3 -3
  95. package/dist/core/filters/exception-filter.decorator.js.map +1 -1
  96. package/dist/core/filters/exception-filter.interface.d.ts +14 -5
  97. package/dist/core/filters/exception-filter.interface.d.ts.map +1 -1
  98. package/dist/core/guards/apikey.guard.d.ts +1 -1
  99. package/dist/core/guards/apikey.guard.d.ts.map +1 -1
  100. package/dist/core/guards/guard.interface.d.ts +1 -1
  101. package/dist/core/guards/guard.interface.d.ts.map +1 -1
  102. package/dist/core/guards/jwt.guard.d.ts +1 -1
  103. package/dist/core/guards/jwt.guard.d.ts.map +1 -1
  104. package/dist/core/guards/oauth.guard.d.ts +1 -1
  105. package/dist/core/guards/oauth.guard.d.ts.map +1 -1
  106. package/dist/core/guards/use-guards.decorator.d.ts +3 -3
  107. package/dist/core/guards/use-guards.decorator.d.ts.map +1 -1
  108. package/dist/core/guards/use-guards.decorator.js +1 -1
  109. package/dist/core/guards/use-guards.decorator.js.map +1 -1
  110. package/dist/core/index.d.ts +2 -2
  111. package/dist/core/index.d.ts.map +1 -1
  112. package/dist/core/index.js.map +1 -1
  113. package/dist/core/interceptors/interceptor.decorator.d.ts +4 -4
  114. package/dist/core/interceptors/interceptor.decorator.d.ts.map +1 -1
  115. package/dist/core/interceptors/interceptor.decorator.js +2 -2
  116. package/dist/core/interceptors/interceptor.decorator.js.map +1 -1
  117. package/dist/core/interceptors/interceptor.interface.d.ts +3 -3
  118. package/dist/core/interceptors/interceptor.interface.d.ts.map +1 -1
  119. package/dist/core/logger.d.ts.map +1 -1
  120. package/dist/core/logger.js.map +1 -1
  121. package/dist/core/middleware/middleware.decorator.d.ts +4 -4
  122. package/dist/core/middleware/middleware.decorator.d.ts.map +1 -1
  123. package/dist/core/middleware/middleware.decorator.js +2 -2
  124. package/dist/core/middleware/middleware.decorator.js.map +1 -1
  125. package/dist/core/middleware/middleware.interface.d.ts +3 -3
  126. package/dist/core/middleware/middleware.interface.d.ts.map +1 -1
  127. package/dist/core/module.d.ts +33 -14
  128. package/dist/core/module.d.ts.map +1 -1
  129. package/dist/core/module.js +11 -6
  130. package/dist/core/module.js.map +1 -1
  131. package/dist/core/oauth-module.d.ts +9 -3
  132. package/dist/core/oauth-module.d.ts.map +1 -1
  133. package/dist/core/oauth-module.js +4 -3
  134. package/dist/core/oauth-module.js.map +1 -1
  135. package/dist/core/pipes/pipe.decorator.d.ts +14 -5
  136. package/dist/core/pipes/pipe.decorator.d.ts.map +1 -1
  137. package/dist/core/pipes/pipe.decorator.js +2 -2
  138. package/dist/core/pipes/pipe.decorator.js.map +1 -1
  139. package/dist/core/pipes/pipe.interface.d.ts +9 -4
  140. package/dist/core/pipes/pipe.interface.d.ts.map +1 -1
  141. package/dist/core/prompt.d.ts +13 -4
  142. package/dist/core/prompt.d.ts.map +1 -1
  143. package/dist/core/prompt.js +2 -2
  144. package/dist/core/prompt.js.map +1 -1
  145. package/dist/core/resource.d.ts +7 -2
  146. package/dist/core/resource.d.ts.map +1 -1
  147. package/dist/core/resource.js +2 -2
  148. package/dist/core/resource.js.map +1 -1
  149. package/dist/core/server.d.ts +49 -3
  150. package/dist/core/server.d.ts.map +1 -1
  151. package/dist/core/server.js +61 -34
  152. package/dist/core/server.js.map +1 -1
  153. package/dist/core/tool.d.ts +44 -16
  154. package/dist/core/tool.d.ts.map +1 -1
  155. package/dist/core/tool.js +19 -6
  156. package/dist/core/tool.js.map +1 -1
  157. package/dist/core/transports/discovery-http-server.d.ts +7 -1
  158. package/dist/core/transports/discovery-http-server.d.ts.map +1 -1
  159. package/dist/core/transports/discovery-http-server.js.map +1 -1
  160. package/dist/core/transports/http-server.d.ts +2 -2
  161. package/dist/core/transports/http-server.d.ts.map +1 -1
  162. package/dist/core/transports/http-server.js +1 -1
  163. package/dist/core/transports/http-server.js.map +1 -1
  164. package/dist/core/transports/streamable-http.d.ts +4 -4
  165. package/dist/core/transports/streamable-http.d.ts.map +1 -1
  166. package/dist/core/transports/streamable-http.js +1 -1
  167. package/dist/core/transports/streamable-http.js.map +1 -1
  168. package/dist/core/types.d.ts +87 -15
  169. package/dist/core/types.d.ts.map +1 -1
  170. package/dist/core/widgets/widget-registry.d.ts +2 -2
  171. package/dist/core/widgets/widget-registry.d.ts.map +1 -1
  172. package/dist/core/widgets/widget-registry.js +1 -1
  173. package/dist/core/widgets/widget-registry.js.map +1 -1
  174. package/dist/testing/index.d.ts +44 -17
  175. package/dist/testing/index.d.ts.map +1 -1
  176. package/dist/testing/index.js +5 -8
  177. package/dist/testing/index.js.map +1 -1
  178. package/dist/ui-next/index.d.ts +1 -1
  179. package/dist/ui-next/index.d.ts.map +1 -1
  180. package/dist/ui-next/index.js.map +1 -1
  181. package/dist/widgets/hooks/useWidgetSDK.d.ts +5 -5
  182. package/dist/widgets/runtime/WidgetLayout.js.map +1 -1
  183. package/dist/widgets/sdk.d.ts +5 -5
  184. package/dist/widgets/sdk.d.ts.map +1 -1
  185. package/dist/widgets/sdk.js.map +1 -1
  186. package/package.json +1 -1
  187. package/src/studio/app/api/auth/fetch-metadata/route.ts +3 -2
  188. package/src/studio/app/api/auth/register-client/route.ts +3 -2
  189. package/src/studio/app/api/chat/route.ts +33 -17
  190. package/src/studio/app/api/health/checks/route.ts +5 -4
  191. package/src/studio/app/api/init/route.ts +3 -2
  192. package/src/studio/app/api/ping/route.ts +3 -2
  193. package/src/studio/app/api/prompts/[name]/route.ts +4 -3
  194. package/src/studio/app/api/prompts/route.ts +3 -2
  195. package/src/studio/app/api/resources/[...uri]/route.ts +3 -2
  196. package/src/studio/app/api/resources/route.ts +3 -2
  197. package/src/studio/app/api/roots/route.ts +3 -2
  198. package/src/studio/app/api/sampling/route.ts +3 -2
  199. package/src/studio/app/api/tools/[name]/call/route.ts +3 -2
  200. package/src/studio/app/api/tools/route.ts +4 -3
  201. package/src/studio/app/api/widget-examples/route.ts +5 -4
  202. package/src/studio/app/auth/callback/page.tsx +9 -8
  203. package/src/studio/app/chat/page.tsx +1535 -468
  204. package/src/studio/app/chat/page.tsx.backup +1046 -187
  205. package/src/studio/app/globals.css +361 -191
  206. package/src/studio/app/health/page.tsx +73 -77
  207. package/src/studio/app/layout.tsx +9 -11
  208. package/src/studio/app/logs/page.tsx +31 -32
  209. package/src/studio/app/page.tsx +136 -232
  210. package/src/studio/app/prompts/page.tsx +115 -97
  211. package/src/studio/app/resources/page.tsx +115 -124
  212. package/src/studio/app/settings/page.tsx +1083 -127
  213. package/src/studio/app/tools/page.tsx +343 -0
  214. package/src/studio/components/EnlargeModal.tsx +76 -65
  215. package/src/studio/components/LogMessage.tsx +6 -6
  216. package/src/studio/components/MarkdownRenderer.tsx +246 -349
  217. package/src/studio/components/Sidebar.tsx +165 -210
  218. package/src/studio/components/SplashScreen.tsx +109 -0
  219. package/src/studio/components/ToolCard.tsx +50 -41
  220. package/src/studio/components/VoiceOrbOverlay.tsx +475 -0
  221. package/src/studio/components/WidgetErrorBoundary.tsx +48 -0
  222. package/src/studio/components/WidgetRenderer.tsx +169 -211
  223. package/src/studio/components/ops/OpsCanvas.tsx +748 -0
  224. package/src/studio/components/ops/OpsNodeDetailPanel.tsx +150 -0
  225. package/src/studio/components/ops/OpsSummaryBar.tsx +90 -0
  226. package/src/studio/components/ops/index.ts +5 -0
  227. package/src/studio/components/ops/nodes/BaseNode.tsx +65 -0
  228. package/src/studio/components/ops/nodes/LLMCallNode.tsx +34 -0
  229. package/src/studio/components/ops/nodes/LLMResponseNode.tsx +33 -0
  230. package/src/studio/components/ops/nodes/ToolCallNode.tsx +30 -0
  231. package/src/studio/components/ops/nodes/ToolResultNode.tsx +43 -0
  232. package/src/studio/components/ops/nodes/UserPromptNode.tsx +34 -0
  233. package/src/studio/components/ops/nodes/WidgetRenderNode.tsx +23 -0
  234. package/src/studio/components/ops/nodes/index.ts +8 -0
  235. package/src/studio/components/tools/ToolsCanvas.tsx +327 -0
  236. package/src/studio/lib/api.ts +61 -42
  237. package/src/studio/lib/http-client-transport.ts +2 -2
  238. package/src/studio/lib/llm-service.ts +126 -47
  239. package/src/studio/lib/mcp-client.ts +9 -6
  240. package/src/studio/lib/ops-store.ts +427 -0
  241. package/src/studio/lib/ops-tracker.ts +416 -0
  242. package/src/studio/lib/ops-types.ts +164 -0
  243. package/src/studio/lib/store.ts +23 -11
  244. package/src/studio/lib/types.ts +228 -38
  245. package/src/studio/lib/widget-loader.ts +2 -2
  246. package/src/studio/package-lock.json +3303 -0
  247. package/src/studio/package.json +3 -1
  248. package/src/studio/public/NitroStudio Isotype Color.png +0 -0
  249. package/src/studio/tailwind.config.ts +63 -17
  250. package/templates/typescript-oauth/src/modules/flights/flights.prompts.ts +19 -22
  251. package/dist/cli/build-widgets.mjs +0 -165
  252. package/src/studio/app/auth/page.tsx +0 -560
  253. package/src/studio/app/ping/page.tsx +0 -209
@@ -1,27 +1,130 @@
1
1
  'use client';
2
2
 
3
- import { useEffect, useRef, useState } from 'react';
3
+ import { useEffect, useRef, useState, useCallback } from 'react';
4
4
  import { useStudioStore } from '@/lib/store';
5
+ import { useOpsStore } from '@/lib/ops-store';
6
+ import { opsTracker, clearOpsSession } from '@/lib/ops-tracker';
7
+ import type { LLMProvider } from '@/lib/ops-types';
5
8
  import { api } from '@/lib/api';
6
9
  import { WidgetRenderer } from '@/components/WidgetRenderer';
10
+ import { WidgetErrorBoundary } from '@/components/WidgetErrorBoundary';
7
11
  import { MarkdownRenderer } from '@/components/MarkdownRenderer';
12
+ import { VoiceOrbOverlay, MiniVoiceOrb } from '@/components/VoiceOrbOverlay';
13
+ import { OpsCanvas } from '@/components/ops/OpsCanvas';
8
14
  import type { ChatMessage, Tool, ToolCall, Prompt } from '@/lib/types';
9
15
  import {
10
- Bot,
11
- Settings,
12
- Trash2,
13
- Image as ImageIcon,
14
- Send,
15
- Wrench,
16
- Save,
17
- X,
18
- Sparkles,
19
- FileText,
20
- Play,
21
- ExternalLink,
22
- Info,
23
- MoreVertical
24
- } from 'lucide-react';
16
+ SparklesIcon,
17
+ Cog6ToothIcon,
18
+ TrashIcon,
19
+ PhotoIcon,
20
+ PaperAirplaneIcon,
21
+ WrenchScrewdriverIcon,
22
+ BookmarkIcon,
23
+ XMarkIcon,
24
+ DocumentTextIcon,
25
+ PlayIcon,
26
+ ArrowTopRightOnSquareIcon,
27
+ InformationCircleIcon,
28
+ EllipsisVerticalIcon,
29
+ MicrophoneIcon,
30
+ SpeakerWaveIcon,
31
+ StopIcon,
32
+ } from '@heroicons/react/24/outline';
33
+
34
+ // Add type for webkitSpeechRecognition
35
+ declare global {
36
+ interface Window {
37
+ webkitSpeechRecognition?: typeof SpeechRecognition;
38
+ }
39
+ }
40
+
41
+ // =============================================================================
42
+ // Token Optimization: Reduce token count for LLM API calls
43
+ // =============================================================================
44
+
45
+ const MAX_HISTORY_MESSAGES = 20; // Sliding window size
46
+ const MAX_TOOL_RESULT_LENGTH = 2000; // Truncate large tool results
47
+ const MAX_CONTENT_LENGTH = 4000; // Truncate very long content
48
+
49
+ /**
50
+ * Optimize messages for LLM to reduce token count while preserving context
51
+ * - Strips widget data (result) from toolCalls (only needed for UI)
52
+ * - Truncates large tool results
53
+ * - Applies sliding window for message history
54
+ * - Removes file data from old messages
55
+ */
56
+ function optimizeMessagesForLLM(messages: ChatMessage[]): ChatMessage[] {
57
+ // Apply sliding window - keep last N messages
58
+ // But always keep system messages and the most recent exchange
59
+ const recentMessages = messages.length > MAX_HISTORY_MESSAGES
60
+ ? messages.slice(-MAX_HISTORY_MESSAGES)
61
+ : messages;
62
+
63
+ return recentMessages.map((msg, idx) => {
64
+ const isLastMessage = idx === recentMessages.length - 1;
65
+
66
+ const cleaned: ChatMessage = {
67
+ role: msg.role,
68
+ content: msg.content || '',
69
+ };
70
+
71
+ // Strip result from toolCalls - LLM already gets results via tool role messages
72
+ // The result is only needed for UI widget rendering, not for LLM context
73
+ if (msg.toolCalls && msg.toolCalls.length > 0) {
74
+ cleaned.toolCalls = msg.toolCalls.map(tc => ({
75
+ id: tc.id,
76
+ name: tc.name,
77
+ arguments: tc.arguments,
78
+ // result intentionally omitted - saves significant tokens!
79
+ }));
80
+ }
81
+
82
+ if (msg.toolCallId) {
83
+ cleaned.toolCallId = msg.toolCallId;
84
+ }
85
+
86
+ // Truncate large tool results to save tokens
87
+ if (msg.role === 'tool' && cleaned.content.length > MAX_TOOL_RESULT_LENGTH) {
88
+ // Try to parse and summarize JSON results
89
+ try {
90
+ const parsed = JSON.parse(cleaned.content);
91
+ if (Array.isArray(parsed)) {
92
+ // For arrays (like flight results), keep first few items
93
+ const truncated = parsed.slice(0, 3);
94
+ cleaned.content = JSON.stringify(truncated) +
95
+ `\n[... ${parsed.length - 3} more items truncated for efficiency]`;
96
+ } else if (typeof parsed === 'object') {
97
+ // For objects, stringify with limit
98
+ cleaned.content = cleaned.content.substring(0, MAX_TOOL_RESULT_LENGTH) +
99
+ '\n[Result truncated for context efficiency]';
100
+ }
101
+ } catch {
102
+ // Not JSON, just truncate
103
+ cleaned.content = cleaned.content.substring(0, MAX_TOOL_RESULT_LENGTH) +
104
+ '\n[Result truncated for context efficiency]';
105
+ }
106
+ }
107
+
108
+ // Truncate very long assistant/user content (rare but possible)
109
+ if ((msg.role === 'assistant' || msg.role === 'user') &&
110
+ cleaned.content.length > MAX_CONTENT_LENGTH) {
111
+ cleaned.content = cleaned.content.substring(0, MAX_CONTENT_LENGTH) +
112
+ '\n[Content truncated]';
113
+ }
114
+
115
+ // Only include file data for the CURRENT message, not history
116
+ // Images are expensive (base64) and LLM already saw them
117
+ if (msg.file && isLastMessage) {
118
+ cleaned.file = msg.file;
119
+ }
120
+ // Old messages with files: just note that an image was attached
121
+ else if (msg.file && !isLastMessage) {
122
+ cleaned.content = cleaned.content || '[Image was attached]';
123
+ }
124
+
125
+ return cleaned;
126
+ });
127
+ }
25
128
 
26
129
  export default function ChatPage() {
27
130
  const {
@@ -34,12 +137,16 @@ export default function ChatPage() {
34
137
  setCurrentFile,
35
138
  tools,
36
139
  setTools,
140
+ elevenLabsApiKey,
141
+ setElevenLabsApiKey
37
142
  } = useStudioStore();
38
143
 
39
- // Get jwtToken and apiKey dynamically to ensure we always have the latest value
144
+ // Ops state
145
+ const { isOpsViewOpen, toggleOpsView, clearSession: clearOpsSessionStore } = useOpsStore();
146
+
147
+ // ... (existing helper methods)
40
148
  const getAuthTokens = () => {
41
149
  const state = useStudioStore.getState();
42
- // Check both jwtToken and OAuth token (from OAuth tab)
43
150
  const jwtToken = state.jwtToken || state.oauthState?.currentToken;
44
151
  return {
45
152
  jwtToken,
@@ -53,166 +160,471 @@ export default function ChatPage() {
53
160
  const [prompts, setPrompts] = useState<Prompt[]>([]);
54
161
  const [selectedPrompt, setSelectedPrompt] = useState<Prompt | null>(null);
55
162
  const [promptArgs, setPromptArgs] = useState<Record<string, string>>({});
56
- const [fullscreenWidget, setFullscreenWidget] = useState<{ uri: string, data: any } | null>(null);
163
+ const [fullscreenWidget, setFullscreenWidget] = useState<{ uri: string, data: unknown } | null>(null);
164
+
165
+ // API key inputs - start empty to avoid hydration mismatch
166
+ const [openaiApiKeyInput, setOpenaiApiKeyInput] = useState('');
167
+ const [geminiApiKeyInput, setGeminiApiKeyInput] = useState('');
168
+
169
+ // Language presets for quick selection
170
+ const LANG_PRESETS: Record<string, { model: string; voice: string; input: string; name: string; greeting: string }> = {
171
+ 'en': { model: 'eleven_flash_v2_5', voice: '21m00Tcm4TlvDq8ikWAM', input: 'en-US', name: 'English', greeting: 'Hi! How can I help you today?' },
172
+ 'hi': { model: 'eleven_multilingual_v2', voice: 'C2S5J6WvmHnrQWjUu6Rg', input: 'hi-IN', name: 'Hindi', greeting: 'नमस्ते! मैं आज आपकी कैसे मदद कर सकता हूं?' },
173
+ 'es': { model: 'eleven_multilingual_v2', voice: 'ErXwobaYiN019PkySvjV', input: 'es-ES', name: 'Spanish', greeting: '¡Hola! ¿Cómo puedo ayudarte hoy?' },
174
+ 'fr': { model: 'eleven_multilingual_v2', voice: 'CwhRBWXzGAHq8TQ4Fs17', input: 'fr-FR', name: 'French', greeting: 'Bonjour! Comment puis-je vous aider aujourd\'hui?' },
175
+ 'de': { model: 'eleven_multilingual_v2', voice: 'EXAVITQu4vr4xnSDxMaL', input: 'de-DE', name: 'German', greeting: 'Hallo! Wie kann ich Ihnen heute helfen?' },
176
+ 'ja': { model: 'eleven_multilingual_v2', voice: 'MF3mGyEYCl7XYWbV9V6O', input: 'ja-JP', name: 'Japanese', greeting: 'こんにちは!今日はどのようにお手伝いできますか?' },
177
+ 'zh': { model: 'eleven_multilingual_v2', voice: 'TxGEqnHWrfWFTfGW9XjX', input: 'zh-CN', name: 'Chinese', greeting: '你好!我今天能帮你什么?' },
178
+ };
179
+
180
+ // Voice Mode State
181
+ type LLMState = 'idle' | 'listening' | 'thinking' | 'speaking';
182
+ const [llmState, setLlmState] = useState<LLMState>('idle');
183
+ const [voiceModeEnabled, setVoiceModeEnabled] = useState(false);
184
+ const [voiceOverlayOpen, setVoiceOverlayOpen] = useState(false);
185
+ const [spokenText, setSpokenText] = useState('');
186
+ const [voiceDisplayMode, setVoiceDisplayMode] = useState<'voice-only' | 'voice-chat'>('voice-only');
187
+ const [showVoiceSettings, setShowVoiceSettings] = useState(false);
188
+
189
+ // Voice Configuration - use defaults initially, load from localStorage in useEffect
190
+ const [voiceModel, setVoiceModel] = useState('eleven_multilingual_v2');
191
+ const [outputLanguage, setOutputLanguage] = useState('en');
192
+ const [inputLanguage, setInputLanguage] = useState('en-US');
193
+ const [voiceId, setVoiceId] = useState('21m00Tcm4TlvDq8ikWAM');
194
+
195
+ // Load settings from localStorage after mount (avoids hydration mismatch)
196
+ useEffect(() => {
197
+ // Voice settings
198
+ const savedVoiceModel = localStorage.getItem('voice_model');
199
+ const savedOutputLang = localStorage.getItem('output_language');
200
+ const savedInputLang = localStorage.getItem('input_language');
201
+ const savedVoiceId = localStorage.getItem('voice_id');
202
+
203
+ if (savedVoiceModel) setVoiceModel(savedVoiceModel);
204
+ if (savedOutputLang) setOutputLanguage(savedOutputLang);
205
+ if (savedInputLang) setInputLanguage(savedInputLang);
206
+ if (savedVoiceId) setVoiceId(savedVoiceId);
207
+
208
+ // API keys
209
+ const savedOpenaiKey = localStorage.getItem('openai_api_key');
210
+ const savedGeminiKey = localStorage.getItem('gemini_api_key');
211
+ if (savedOpenaiKey) setOpenaiApiKeyInput(savedOpenaiKey);
212
+ if (savedGeminiKey) setGeminiApiKeyInput(savedGeminiKey);
213
+ }, []);
214
+
215
+ // Dynamic API data
216
+ interface ElevenLabsModel {
217
+ model_id: string;
218
+ name: string;
219
+ languages?: { language_id: string; name: string }[];
220
+ }
221
+ interface ElevenLabsVoice {
222
+ voice_id: string;
223
+ name: string;
224
+ labels?: { accent?: string; language?: string;[key: string]: string | undefined };
225
+ category?: string;
226
+ }
227
+ const [availableModels, setAvailableModels] = useState<ElevenLabsModel[]>([]);
228
+ const [availableVoices, setAvailableVoices] = useState<ElevenLabsVoice[]>([]);
229
+ const [loadingVoiceData, setLoadingVoiceData] = useState(false);
230
+
231
+ const audioRef = useRef<HTMLAudioElement | null>(null);
232
+ const hasSpokenGreeting = useRef(false); // Prevent double greeting
233
+
57
234
  const messagesEndRef = useRef<HTMLDivElement>(null);
58
235
  const fileInputRef = useRef<HTMLInputElement>(null);
59
236
  const textareaRef = useRef<HTMLTextAreaElement>(null);
60
237
  const initialToolExecuted = useRef(false);
61
238
 
239
+ // Load tools and prompts on mount
62
240
  useEffect(() => {
63
241
  loadTools();
64
242
  loadPrompts();
65
-
66
- // Check if there's a suggested message from localStorage
67
- if (typeof window !== 'undefined') {
68
- const chatInput = window.localStorage.getItem('chatInput');
69
- if (chatInput) {
70
- setInputValue(chatInput);
71
- window.localStorage.removeItem('chatInput');
72
- // Focus after a short delay to ensure component is mounted
73
- setTimeout(() => textareaRef.current?.focus(), 100);
74
- }
75
- }
76
243
  }, []);
77
244
 
245
+ // Auto-execute initial tool when tools are loaded
78
246
  useEffect(() => {
79
247
  if (tools.length > 0 && !initialToolExecuted.current) {
80
248
  checkAndRunInitialTool();
81
249
  }
82
250
  }, [tools]);
83
251
 
252
+ // Scroll to bottom when messages change
84
253
  useEffect(() => {
85
254
  messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' });
86
255
  }, [chatMessages]);
87
256
 
88
- // Auto-focus textarea on mount and after sending
257
+ // Fetch ElevenLabs models when settings opens
89
258
  useEffect(() => {
90
- textareaRef.current?.focus();
91
- }, [chatMessages, loading]);
259
+ if ((!showVoiceSettings && !showSettings) || !elevenLabsApiKey) return;
92
260
 
93
- // Auto-resize textarea based on content
94
- useEffect(() => {
95
- const textarea = textareaRef.current;
96
- if (textarea) {
97
- textarea.style.height = '44px'; // Reset to min height
98
- const scrollHeight = textarea.scrollHeight;
99
- textarea.style.height = Math.min(scrollHeight, 200) + 'px'; // Max 200px
100
- }
101
- }, [inputValue]);
261
+ const fetchModels = async () => {
262
+ try {
263
+ const modelsRes = await fetch('https://api.elevenlabs.io/v1/models', {
264
+ headers: { 'xi-api-key': elevenLabsApiKey }
265
+ });
266
+ if (modelsRes.ok) {
267
+ const modelsData = await modelsRes.json();
268
+ setAvailableModels(modelsData);
269
+ }
270
+ } catch (err) {
271
+ console.error('Failed to fetch ElevenLabs models:', err);
272
+ }
273
+ };
102
274
 
103
- // Listen for widget fullscreen requests
275
+ fetchModels();
276
+ }, [showVoiceSettings, showSettings, elevenLabsApiKey]);
277
+
278
+ // Fetch voices when settings opens or output language changes
104
279
  useEffect(() => {
105
- const handleFullscreenRequest = (event: CustomEvent) => {
106
- const { uri, data } = event.detail;
107
- setFullscreenWidget({ uri, data });
280
+ if ((!showVoiceSettings && !showSettings) || !elevenLabsApiKey) return;
281
+
282
+ const fetchVoices = async () => {
283
+ setLoadingVoiceData(true);
284
+ try {
285
+ // Map output language to ElevenLabs language code
286
+ const langMap: Record<string, string> = {
287
+ 'en': 'en', 'hi': 'hi', 'es': 'es', 'fr': 'fr', 'de': 'de',
288
+ 'ja': 'ja', 'ko': 'ko', 'zh': 'zh', 'pt': 'pt', 'it': 'it'
289
+ };
290
+ const langCode = langMap[outputLanguage] || 'en';
291
+
292
+ // Fetch user's own voices
293
+ const userVoicesRes = await fetch('https://api.elevenlabs.io/v1/voices', {
294
+ headers: { 'xi-api-key': elevenLabsApiKey }
295
+ });
296
+ let userVoices: ElevenLabsVoice[] = [];
297
+ if (userVoicesRes.ok) {
298
+ const data = await userVoicesRes.json();
299
+ userVoices = data.voices || [];
300
+ }
301
+
302
+ // Fetch shared voices filtered by language
303
+ const sharedVoicesRes = await fetch(
304
+ `https://api.elevenlabs.io/v1/shared-voices?language=${langCode}&page_size=50`,
305
+ { headers: { 'xi-api-key': elevenLabsApiKey } }
306
+ );
307
+ let sharedVoices: ElevenLabsVoice[] = [];
308
+ if (sharedVoicesRes.ok) {
309
+ const data = await sharedVoicesRes.json();
310
+ interface SharedVoiceData {
311
+ voice_id: string;
312
+ name: string;
313
+ accent?: string;
314
+ language?: string;
315
+ }
316
+ sharedVoices = (data.voices || []).map((v: SharedVoiceData) => ({
317
+ voice_id: v.voice_id,
318
+ name: v.name,
319
+ labels: { accent: v.accent || v.language },
320
+ category: 'shared'
321
+ }));
322
+ }
323
+
324
+ // Combine: user voices first, then shared voices
325
+ setAvailableVoices([...userVoices, ...sharedVoices]);
326
+ } catch (err) {
327
+ console.error('Failed to fetch ElevenLabs voices:', err);
328
+ } finally {
329
+ setLoadingVoiceData(false);
330
+ }
108
331
  };
109
332
 
110
- window.addEventListener('widget-fullscreen-request', handleFullscreenRequest as EventListener);
111
- return () => window.removeEventListener('widget-fullscreen-request', handleFullscreenRequest as EventListener);
112
- }, []);
333
+ fetchVoices();
334
+ }, [showVoiceSettings, elevenLabsApiKey, outputLanguage]);
113
335
 
114
- // Listen for widget tool call requests
115
- useEffect(() => {
116
- let isProcessingToolCall = false;
336
+ // Note: Speech recognition is now handled by VoiceOrbOverlay component
117
337
 
118
- const handleToolCall = async (event: any) => {
119
- // Prevent multiple simultaneous calls
120
- if (isProcessingToolCall) {
121
- console.log('⏭️ Skipping duplicate tool call');
122
- return;
338
+ // Text-to-Speech logic for new messages (when in voice mode or overlay is open)
339
+ useEffect(() => {
340
+ // Only trigger TTS if voice mode is enabled OR overlay is open
341
+ if ((!voiceModeEnabled && !voiceOverlayOpen) || !elevenLabsApiKey || chatMessages.length === 0) return;
342
+
343
+ const lastMessage = chatMessages[chatMessages.length - 1];
344
+ if (lastMessage.role === 'assistant' && lastMessage.content) {
345
+ // Stop any current audio
346
+ if (audioRef.current) {
347
+ audioRef.current.pause();
348
+ audioRef.current = null;
123
349
  }
350
+ // Set the text being spoken for overlay display
351
+ const voiceText = convertToVoiceFriendlyText(lastMessage.content);
352
+ setSpokenText(voiceText);
353
+ playTextToSpeech(voiceText);
354
+ }
355
+ }, [chatMessages, voiceModeEnabled, voiceOverlayOpen, elevenLabsApiKey]);
124
356
 
357
+ // Listen for widget-to-MCP tool calls
358
+ useEffect(() => {
359
+ const handleWidgetToolCall = async (event: CustomEvent<{ toolName: string; toolArgs: Record<string, unknown> }>) => {
125
360
  const { toolName, toolArgs } = event.detail;
126
- console.log('📞 Chat received tool call from widget:', toolName, toolArgs);
127
-
128
- isProcessingToolCall = true;
361
+ console.log('🔧 Widget tool call received:', toolName, toolArgs);
129
362
 
130
363
  try {
131
- // Get current state directly from store to avoid stale closure
132
- const currentMessages = useStudioStore.getState().chatMessages;
133
- const currentProv = useStudioStore.getState().currentProvider;
364
+ setLoading(true);
365
+ const { jwtToken, mcpApiKey } = getAuthTokens();
366
+ const effectiveToken = jwtToken || useStudioStore.getState().oauthState?.currentToken;
367
+
368
+ // Track ops: Start tool call from widget interaction
369
+ const toolCallId = `call_${Date.now()}`;
370
+ opsTracker.startToolCall(toolCallId, toolName, toolArgs || {});
371
+ const toolStartTime = Date.now();
372
+
373
+ // Call the tool via API
374
+ const rawResult = await api.callTool(
375
+ toolName,
376
+ toolArgs || {},
377
+ effectiveToken,
378
+ mcpApiKey || undefined
379
+ );
380
+
381
+ console.log('✅ Widget tool call raw result:', rawResult);
382
+
383
+ // Parse the MCP result format: { content: [{ type: "text", text: "..." }] }
384
+ let parsedResult = rawResult;
385
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
386
+ const mcpResult = rawResult as any;
387
+ if (mcpResult?.content?.[0]?.text) {
388
+ try {
389
+ parsedResult = JSON.parse(mcpResult.content[0].text);
390
+ // Unwrap if response was wrapped by TransformInterceptor
391
+ if (parsedResult && typeof parsedResult === 'object' && 'success' in parsedResult && 'data' in parsedResult) {
392
+ parsedResult = (parsedResult as { success: boolean; data: unknown }).data;
393
+ }
394
+ } catch {
395
+ parsedResult = { content: mcpResult.content[0].text };
396
+ }
397
+ }
134
398
 
135
- // Directly send the tool call message without showing in input
136
- const toolCallMessage = `Use the ${toolName} tool with these arguments: ${JSON.stringify(toolArgs)}`;
399
+ console.log('✅ Widget tool call parsed result:', parsedResult);
137
400
 
138
- // Add user message
139
- const userMessage: ChatMessage = {
140
- role: 'user',
141
- content: toolCallMessage,
142
- };
143
- addChatMessage(userMessage);
401
+ // Track ops: Complete tool call with result
402
+ const resultNodeId = opsTracker.completeToolCall(toolCallId, toolName, parsedResult);
144
403
 
145
- // Call LLM
146
- setLoading(true);
147
- try {
148
- const { jwtToken, mcpApiKey } = getAuthTokens();
149
- const apiKey = localStorage.getItem(`${currentProv}_api_key`);
150
- const response = await api.chat({
151
- provider: currentProv,
152
- messages: [...currentMessages, userMessage],
153
- apiKey: apiKey || '',
154
- jwtToken: jwtToken || undefined,
155
- mcpApiKey: mcpApiKey || undefined,
156
- });
157
-
158
- // Handle tool calls (same as handleSend)
159
- if (response.toolCalls && response.toolResults) {
160
- // Attach results to tool calls for widget rendering
161
- const toolCallsWithResults = response.toolCalls.map((tc: any, i: any) => {
162
- const toolResult = response.toolResults[i];
163
- let parsedResult;
164
- if (toolResult.content) {
165
- try {
166
- parsedResult = JSON.parse(toolResult.content);
167
- } catch (e) {
168
- parsedResult = { raw: toolResult.content };
169
- }
170
- }
171
- return { ...tc, result: parsedResult };
172
- });
404
+ // Track ops: Widget render
405
+ const toolDef = tools.find(t => t.name === toolName);
406
+ const widgetUri = toolDef?.widget?.route || toolDef?.outputTemplate || toolDef?._meta?.['openai/outputTemplate'];
407
+ if (widgetUri && parsedResult && resultNodeId) {
408
+ opsTracker.trackWidget(widgetUri, toolName, parsedResult, resultNodeId);
409
+ }
173
410
 
174
- if (response.message) {
175
- response.message.toolCalls = toolCallsWithResults;
176
- addChatMessage(response.message);
177
- }
411
+ // Add assistant message with tool call info
412
+ const assistantMsg: ChatMessage = {
413
+ role: 'assistant',
414
+ content: ``,
415
+ toolCalls: [{
416
+ id: toolCallId,
417
+ name: toolName,
418
+ arguments: toolArgs || {},
419
+ result: parsedResult // Attach parsed result for widget rendering
420
+ }]
421
+ };
422
+ addChatMessage(assistantMsg);
178
423
 
179
- // Add tool results
180
- const toolResultMessages: ChatMessage[] = [];
181
- for (const result of response.toolResults) {
182
- addChatMessage(result);
183
- toolResultMessages.push(result);
424
+ // Add tool result message
425
+ const toolResultMsg: ChatMessage = {
426
+ role: 'tool',
427
+ content: JSON.stringify(parsedResult),
428
+ toolCallId: toolCallId
429
+ };
430
+ addChatMessage(toolResultMsg);
431
+
432
+ // Continue conversation to get LLM response about the tool result (like nitrochat)
433
+ const apiKey = localStorage.getItem(`${currentProvider}_api_key`);
434
+ if (apiKey && apiKey !== '••••••••') {
435
+ // Get current messages and add instruction to prevent tool chaining
436
+ const currentMessages = useStudioStore.getState().chatMessages;
437
+
438
+ // Add a system-style instruction (as user message) to prevent the LLM from calling more tools
439
+ // This is hidden from the user but guides the LLM to just summarize
440
+ const messagesForLLM: ChatMessage[] = [
441
+ ...currentMessages,
442
+ {
443
+ role: 'user',
444
+ content: `[INSTRUCTION: The user clicked on a widget item which called the "${toolName}" tool. Please summarize the result above in a helpful way. IMPORTANT: Do NOT call any additional tools - just provide a text summary of what was retrieved. The user only wanted to see details for this specific item.]`
184
445
  }
185
-
186
- // Continue conversation
187
- const messagesForContinuation = [
188
- ...currentMessages,
189
- userMessage,
190
- response.message!,
191
- ...toolResultMessages,
192
- ];
193
-
194
- // Call continueChatWithToolResults
195
- await continueChatWithToolResults(apiKey || '', messagesForContinuation);
196
- } else if (response.message) {
197
- addChatMessage(response.message);
198
- }
199
-
200
- setLoading(false);
201
- } catch (error) {
202
- console.error('Tool call failed:', error);
203
- setLoading(false);
446
+ ];
447
+
448
+ await continueChatWithToolResults(apiKey, messagesForLLM, true);
204
449
  }
450
+
451
+ } catch (error) {
452
+ console.error('❌ Widget tool call failed:', error);
453
+ // Track ops: Tool call error
454
+ opsTracker.errorLLMCall();
455
+ addChatMessage({
456
+ role: 'assistant',
457
+ content: `Failed to execute tool ${toolName}: ${error instanceof Error ? error.message : String(error)}`
458
+ });
205
459
  } finally {
206
- // Reset flag after a short delay to allow next call
207
- setTimeout(() => {
208
- isProcessingToolCall = false;
209
- }, 1000);
460
+ setLoading(false);
210
461
  }
211
462
  };
212
463
 
213
- window.addEventListener('widget-tool-call', handleToolCall);
214
- return () => window.removeEventListener('widget-tool-call', handleToolCall);
215
- }, []); // Empty dependency array - only register once
464
+ window.addEventListener('widget-tool-call', handleWidgetToolCall as EventListener);
465
+ return () => {
466
+ window.removeEventListener('widget-tool-call', handleWidgetToolCall as EventListener);
467
+ };
468
+ }, [addChatMessage, currentProvider]);
469
+
470
+ // Convert markdown content to voice-friendly, conversational text
471
+ // Optimized for minimal TTS token usage
472
+ const convertToVoiceFriendlyText = (text: string): string => {
473
+ if (!text) return '';
474
+
475
+ let result = text;
476
+
477
+ // Remove code blocks entirely (not suitable for voice)
478
+ result = result.replace(/```[\s\S]*?```/g, 'I\'ve included code in the chat.');
479
+ result = result.replace(/`[^`]+`/g, '');
480
+
481
+ // Remove tables
482
+ result = result.replace(/\|[\s\S]*?\|/g, '');
483
+ if (text.includes('|')) {
484
+ result = result + ' Check the chat for table details.';
485
+ }
486
+
487
+ // Remove markdown bold/italic
488
+ result = result.replace(/\*\*([^*]+)\*\*/g, '$1');
489
+ result = result.replace(/\*([^*]+)\*/g, '$1');
490
+ result = result.replace(/__([^_]+)__/g, '$1');
491
+ result = result.replace(/_([^_]+)_/g, '$1');
492
+
493
+ // Remove markdown headers
494
+ result = result.replace(/^#{1,6}\s+/gm, '');
495
+
496
+ // Remove markdown links, keep text
497
+ result = result.replace(/\[([^\]]+)\]\([^)]+\)/g, '$1');
498
+
499
+ // Handle bullet lists - summarize aggressively
500
+ const bulletMatches = result.match(/^[-*]\s+.+$/gm);
501
+ if (bulletMatches && bulletMatches.length > 3) {
502
+ // Get first 2 clean items
503
+ const first2 = bulletMatches.slice(0, 2).map(item =>
504
+ item.replace(/^[-*]\s+/, '').replace(/\*\*/g, '').replace(/\s*\([A-Z]{2,4}\)\s*/g, '').trim()
505
+ );
506
+ const count = bulletMatches.length;
507
+
508
+ // Replace entire list with summary
509
+ const listPattern = /((?:^[-*]\s+.+$\n?)+)/gm;
510
+ result = result.replace(listPattern, `I found ${count} items, including ${first2[0]} and ${first2[1]}. `);
511
+ } else if (bulletMatches) {
512
+ // For short lists, just mention count and first item
513
+ const first = bulletMatches[0].replace(/^[-*]\s+/, '').replace(/\*\*/g, '').trim();
514
+ result = result.replace(/((?:^[-*]\s+.+$\n?)+)/gm, `${bulletMatches.length} options: ${first} and others. `);
515
+ }
516
+
517
+ // Remove numbered lists, summarize
518
+ const numberedMatches = result.match(/^\d+\.\s+.+$/gm);
519
+ if (numberedMatches && numberedMatches.length > 3) {
520
+ const first = numberedMatches[0].replace(/^\d+\.\s+/, '').trim();
521
+ result = result.replace(/((?:^\d+\.\s+.+$\n?)+)/gm, `${numberedMatches.length} steps, starting with: ${first}. `);
522
+ } else {
523
+ result = result.replace(/^\d+\.\s+/gm, '');
524
+ }
525
+
526
+ // Remove parenthetical codes like (LON), (STN) for voice
527
+ result = result.replace(/\s*\([A-Z]{2,4}\)\s*/g, ' ');
528
+
529
+ // Clean up multiple newlines and spaces
530
+ result = result.replace(/\n{2,}/g, '. ');
531
+ result = result.replace(/\n/g, ', ');
532
+ result = result.replace(/\s{2,}/g, ' ');
533
+
534
+ // Hard limit: 80 words max for voice response
535
+ const words = result.split(/\s+/).filter(w => w.length > 0);
536
+ if (words.length > 80) {
537
+ result = words.slice(0, 80).join(' ') + '. Would you like more details?';
538
+ }
539
+
540
+ // Clean up any remaining artifacts
541
+ result = result.replace(/,\s*,/g, ',');
542
+ result = result.replace(/\.\s*\./g, '.');
543
+ result = result.replace(/,\s*\./g, '.');
544
+ result = result.trim();
545
+
546
+ return result;
547
+ };
548
+
549
+ // Note: toggleRecording removed - VoiceOrbOverlay handles speech recognition
550
+
551
+ const playTextToSpeech = async (text: string) => {
552
+ console.log('🔊 playTextToSpeech called with:', text?.substring(0, 50));
553
+ console.log('🎤 Using voiceId:', voiceId);
554
+ console.log('🎤 Using voiceModel:', voiceModel);
555
+
556
+ if (!elevenLabsApiKey) {
557
+ console.error('❌ No ElevenLabs API key configured');
558
+ return;
559
+ }
560
+
561
+ try {
562
+ setLlmState('speaking');
563
+
564
+ const response = await fetch(`https://api.elevenlabs.io/v1/text-to-speech/${voiceId}/stream`, {
565
+ method: 'POST',
566
+ headers: {
567
+ 'Content-Type': 'application/json',
568
+ 'xi-api-key': elevenLabsApiKey,
569
+ },
570
+ body: JSON.stringify({
571
+ text,
572
+ model_id: voiceModel,
573
+ voice_settings: {
574
+ stability: 0.5,
575
+ similarity_boost: 0.75,
576
+ },
577
+ }),
578
+ });
579
+
580
+ console.log('📡 ElevenLabs response status:', response.status);
581
+
582
+ if (!response.ok) {
583
+ const errorText = await response.text();
584
+ console.error('❌ ElevenLabs API error:', errorText);
585
+ throw new Error(`TTS failed: ${response.status} - ${errorText}`);
586
+ }
587
+
588
+ const blob = await response.blob();
589
+ console.log('🎵 Audio blob size:', blob.size, 'bytes');
590
+
591
+ const url = URL.createObjectURL(blob);
592
+ const audio = new Audio(url);
593
+
594
+ audio.onended = () => {
595
+ console.log('🔊 Audio playback ended');
596
+ setLlmState('listening'); // Resume listening after speaking
597
+ URL.revokeObjectURL(url);
598
+ };
599
+
600
+ audio.onerror = (e) => {
601
+ console.error('❌ Audio playback error:', e);
602
+ setLlmState('idle');
603
+ URL.revokeObjectURL(url);
604
+ };
605
+
606
+ audioRef.current = audio;
607
+
608
+ try {
609
+ await audio.play();
610
+ console.log('▶️ Audio playing');
611
+ } catch (playError) {
612
+ console.error('❌ Audio play failed (autoplay policy?):', playError);
613
+ setLlmState('idle');
614
+ }
615
+ } catch (error) {
616
+ console.error('❌ TTS Error:', error);
617
+ setLlmState('idle');
618
+ }
619
+ };
620
+
621
+ const stopSpeaking = () => {
622
+ if (audioRef.current) {
623
+ audioRef.current.pause();
624
+ audioRef.current = null;
625
+ }
626
+ setLlmState('idle');
627
+ };
216
628
 
217
629
  const loadTools = async () => {
218
630
  try {
@@ -248,36 +660,65 @@ export default function ChatPage() {
248
660
  initialToolExecuted.current = true;
249
661
  console.log('🚀 Auto-executing initial tool:', initialTool.name);
250
662
 
251
- // Initial message
252
- const autoMsg: ChatMessage = {
253
- role: 'user',
254
- content: `(Auto) Executing initial tool: ${initialTool.name}`,
255
- };
256
- addChatMessage(autoMsg);
257
663
  setLoading(true);
258
664
 
665
+ // Track ops: Start a new turn for initial tool
666
+ opsTracker.startTurn(`[Initial] ${initialTool.name}`);
667
+
259
668
  try {
260
669
  const { jwtToken, mcpApiKey } = getAuthTokens();
261
670
  const effectiveToken = jwtToken || useStudioStore.getState().oauthState?.currentToken;
262
671
 
672
+ // Track ops: Start tool call
673
+ const toolCallId = `call_${Date.now()}_init`;
674
+ opsTracker.startToolCall(toolCallId, initialTool.name, {});
675
+
263
676
  // Call the tool
264
- const result = await api.callTool(
677
+ const rawResult = await api.callTool(
265
678
  initialTool.name,
266
679
  {},
267
680
  effectiveToken,
268
681
  mcpApiKey || undefined
269
682
  );
270
683
 
271
- // Add assistant message with tool call info
272
- const toolCallId = `call_${Date.now()}`;
684
+ console.log('✅ Initial tool raw result:', rawResult);
685
+
686
+ // Parse the MCP result format: { content: [{ type: "text", text: "..." }] }
687
+ let parsedResult = rawResult;
688
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
689
+ const mcpResult = rawResult as any;
690
+ if (mcpResult?.content?.[0]?.text) {
691
+ try {
692
+ parsedResult = JSON.parse(mcpResult.content[0].text);
693
+ // Unwrap if response was wrapped by TransformInterceptor
694
+ if (parsedResult && typeof parsedResult === 'object' && 'success' in parsedResult && 'data' in parsedResult) {
695
+ parsedResult = (parsedResult as { success: boolean; data: unknown }).data;
696
+ }
697
+ } catch {
698
+ parsedResult = { content: mcpResult.content[0].text };
699
+ }
700
+ }
701
+
702
+ console.log('✅ Initial tool parsed result:', parsedResult);
703
+
704
+ // Track ops: Complete tool call with result
705
+ const resultNodeId = opsTracker.completeToolCall(toolCallId, initialTool.name, parsedResult);
706
+
707
+ // Track ops: Widget render
708
+ const widgetUri = initialTool?.widget?.route || initialTool?.outputTemplate || initialTool?._meta?.['openai/outputTemplate'];
709
+ if (widgetUri && parsedResult && resultNodeId) {
710
+ opsTracker.trackWidget(widgetUri, initialTool.name, parsedResult, resultNodeId);
711
+ }
712
+
713
+ // Add assistant message with tool call info (widget will render from this)
273
714
  const assistantMsg: ChatMessage = {
274
715
  role: 'assistant',
275
- content: `Invoking ${initialTool.name}...`,
716
+ content: '',
276
717
  toolCalls: [{
277
718
  id: toolCallId,
278
719
  name: initialTool.name,
279
720
  arguments: {},
280
- result // Attach result here for widget rendering
721
+ result: parsedResult // Attach parsed result for widget rendering
281
722
  }]
282
723
  };
283
724
  addChatMessage(assistantMsg);
@@ -285,13 +726,18 @@ export default function ChatPage() {
285
726
  // Add tool result message
286
727
  const toolResultMsg: ChatMessage = {
287
728
  role: 'tool',
288
- content: JSON.stringify(result),
729
+ content: JSON.stringify(parsedResult),
289
730
  toolCallId: toolCallId
290
731
  };
291
732
  addChatMessage(toolResultMsg);
292
733
 
734
+ // Note: Like nitrochat, we don't continue chat automatically here
735
+ // We just show the result (widget) as the starting state
736
+
293
737
  } catch (error) {
294
- console.error('Initial tool execution failed:', error);
738
+ console.error('Initial tool execution failed:', error);
739
+ // Track ops: Error
740
+ opsTracker.errorLLMCall();
295
741
  addChatMessage({
296
742
  role: 'assistant',
297
743
  content: `Failed to execute initial tool ${initialTool.name}: ${error instanceof Error ? error.message : String(error)}`
@@ -331,11 +777,15 @@ export default function ChatPage() {
331
777
  // Add the prompt result as an assistant message
332
778
  if (result.messages && result.messages.length > 0) {
333
779
  // Combine all prompt messages into one assistant message
334
- const combinedContent = result.messages
335
- .map((msg: any) => {
780
+ interface PromptMessageResult {
781
+ role: string;
782
+ content: string | { text?: string } | unknown;
783
+ }
784
+ const combinedContent = (result.messages as PromptMessageResult[])
785
+ .map((msg) => {
336
786
  const content = typeof msg.content === 'string'
337
787
  ? msg.content
338
- : msg.content?.text || JSON.stringify(msg.content);
788
+ : (msg.content as { text?: string })?.text || JSON.stringify(msg.content);
339
789
  return `[${msg.role.toUpperCase()}]\n${content}`;
340
790
  })
341
791
  .join('\n\n');
@@ -382,8 +832,11 @@ export default function ChatPage() {
382
832
  reader.readAsDataURL(file);
383
833
  };
384
834
 
385
- const handleSend = async () => {
386
- if (!inputValue.trim() && !currentFile) return;
835
+ const handleSend = async (directMessage?: string) => {
836
+ // Use direct message if provided (from voice mode), otherwise use inputValue
837
+ const messageText = directMessage || inputValue;
838
+
839
+ if (!messageText.trim() && !currentFile) return;
387
840
 
388
841
  const apiKey = localStorage.getItem(`${currentProvider}_api_key`);
389
842
  if (!apiKey) {
@@ -394,7 +847,7 @@ export default function ChatPage() {
394
847
 
395
848
  const userMessage: ChatMessage = {
396
849
  role: 'user',
397
- content: inputValue,
850
+ content: messageText,
398
851
  };
399
852
 
400
853
  if (currentFile) {
@@ -406,56 +859,67 @@ export default function ChatPage() {
406
859
  setCurrentFile(null);
407
860
  setLoading(true);
408
861
 
862
+ // Track ops: Start a new turn
863
+ opsTracker.startTurn(messageText, !!currentFile);
864
+
409
865
  try {
410
866
  const messagesToSend = [...chatMessages, userMessage];
411
867
 
412
- // Clean messages to ensure they're serializable
413
- const cleanedMessages = messagesToSend.map(msg => {
414
- const cleaned: any = {
415
- role: msg.role,
416
- content: msg.content || '',
417
- };
418
-
419
- if (msg.toolCalls && msg.toolCalls.length > 0) {
420
- cleaned.toolCalls = msg.toolCalls;
421
- }
422
-
423
- if (msg.toolCallId) {
424
- cleaned.toolCallId = msg.toolCallId;
425
- }
426
-
427
- // Skip image property for now (not supported by OpenAI chat completions)
428
- if (msg.file) {
429
- cleaned.file = msg.file;
430
- }
431
-
432
- return cleaned;
433
- });
868
+ // Optimize messages to reduce token count
869
+ const cleanedMessages = optimizeMessagesForLLM(messagesToSend);
434
870
 
435
871
  // Get fresh auth tokens from store
436
872
  const { jwtToken, mcpApiKey } = getAuthTokens();
437
873
 
438
- console.log('Sending messages to API:', cleanedMessages);
874
+ // Add language instruction for voice mode (if non-English)
875
+ let messagesForApi = cleanedMessages;
876
+ if (voiceModeEnabled && outputLanguage !== 'en') {
877
+ const langNames: Record<string, string> = {
878
+ 'hi': 'Hindi', 'es': 'Spanish', 'fr': 'French', 'de': 'German',
879
+ 'ja': 'Japanese', 'ko': 'Korean', 'zh': 'Chinese', 'pt': 'Portuguese', 'it': 'Italian'
880
+ };
881
+ const langName = langNames[outputLanguage] || outputLanguage;
882
+ const langInstruction = {
883
+ role: 'system',
884
+ content: `IMPORTANT: The user is using voice mode with ${langName} language. You MUST respond in ${langName}. Keep responses concise for voice output.`
885
+ };
886
+ messagesForApi = [langInstruction, ...cleanedMessages];
887
+ }
888
+
889
+ console.log('Sending messages to API:', messagesForApi);
439
890
  console.log('Auth tokens:', { hasJwtToken: !!jwtToken, hasMcpApiKey: !!mcpApiKey });
440
891
  console.log('Original messages:', messagesToSend);
441
- console.log('Cleaned messages JSON:', JSON.stringify(cleanedMessages));
892
+ console.log('Voice mode:', voiceModeEnabled, 'Output language:', outputLanguage);
893
+
894
+ // Track ops: LLM call start
895
+ const llmModel = currentProvider === 'gemini' ? 'gemini-1.5-pro' : 'gpt-4o';
896
+ opsTracker.startLLMCall(currentProvider as LLMProvider, llmModel);
442
897
 
443
898
  const response = await api.chat({
444
899
  provider: currentProvider,
445
- messages: cleanedMessages,
900
+ messages: messagesForApi,
446
901
  apiKey, // LLM API key (OpenAI/Gemini)
447
902
  jwtToken: jwtToken || undefined,
448
903
  mcpApiKey: mcpApiKey || undefined, // MCP server API key
449
904
  });
450
905
 
906
+ // Track ops: LLM call complete (estimate tokens based on content length)
907
+ const inputTokenEstimate = Math.ceil(JSON.stringify(messagesForApi).length / 4);
908
+ const outputTokenEstimate = Math.ceil((response.message?.content?.length || 0) / 4);
909
+ opsTracker.completeLLMCall(inputTokenEstimate, outputTokenEstimate);
910
+
451
911
  // Handle tool calls FIRST (before adding the message)
452
912
  if (response.toolCalls && response.toolResults) {
453
- // Attach results to tool calls for widget rendering
454
- const toolCallsWithResults = response.toolCalls.map((tc, i) => {
913
+ // Track ops: Process each tool call
914
+ const toolCallsWithResults = response.toolCalls.map((tc: ToolCall, i: number) => {
455
915
  const toolResult = response.toolResults[i];
456
916
 
917
+ // Track ops: Tool call start
918
+ opsTracker.startToolCall(tc.id, tc.name, tc.arguments as Record<string, unknown>);
919
+
457
920
  // Parse the result content
458
921
  let parsedResult;
922
+ let hasError = false;
459
923
  if (toolResult.content) {
460
924
  try {
461
925
  parsedResult = JSON.parse(toolResult.content);
@@ -469,6 +933,16 @@ export default function ChatPage() {
469
933
  }
470
934
  }
471
935
 
936
+ // Track ops: Tool call complete with result
937
+ const resultNodeId = opsTracker.completeToolCall(tc.id, tc.name, parsedResult, hasError ? 'Error processing result' : undefined);
938
+
939
+ // Track ops: Widget render if tool has a widget
940
+ const toolDef = tools.find(t => t.name === tc.name);
941
+ const widgetUri = toolDef?.widget?.route || toolDef?.outputTemplate || toolDef?._meta?.['openai/outputTemplate'];
942
+ if (widgetUri && parsedResult && resultNodeId) {
943
+ opsTracker.trackWidget(widgetUri, tc.name, parsedResult, resultNodeId);
944
+ }
945
+
472
946
  return {
473
947
  ...tc,
474
948
  result: parsedResult,
@@ -521,6 +995,12 @@ export default function ChatPage() {
521
995
  // No tool calls, just add the message
522
996
  if (response.message) {
523
997
  addChatMessage(response.message);
998
+
999
+ // Track ops: Final response
1000
+ opsTracker.trackResponse(
1001
+ response.message.content || '',
1002
+ outputTokenEstimate
1003
+ );
524
1004
  }
525
1005
  }
526
1006
 
@@ -528,6 +1008,10 @@ export default function ChatPage() {
528
1008
  setLoading(false);
529
1009
  } catch (error) {
530
1010
  console.error('Chat error:', error);
1011
+
1012
+ // Track ops: LLM call error
1013
+ opsTracker.errorLLMCall();
1014
+
531
1015
  addChatMessage({
532
1016
  role: 'assistant',
533
1017
  content: 'Sorry, I encountered an error. Please try again.',
@@ -536,7 +1020,7 @@ export default function ChatPage() {
536
1020
  }
537
1021
  };
538
1022
 
539
- const continueChatWithToolResults = async (apiKey: string, messages?: ChatMessage[]) => {
1023
+ const continueChatWithToolResults = async (apiKey: string, messages?: ChatMessage[], isFromWidget: boolean = false) => {
540
1024
  try {
541
1025
  // Use provided messages or fall back to store (for recursive calls)
542
1026
  const messagesToUse = messages || chatMessages;
@@ -544,26 +1028,15 @@ export default function ChatPage() {
544
1028
  // Get fresh auth tokens from store (token may have been updated by login)
545
1029
  const { jwtToken, mcpApiKey } = getAuthTokens();
546
1030
 
547
- // Clean messages before sending
548
- const cleanedMessages = messagesToUse.map(msg => {
549
- const cleaned: any = {
550
- role: msg.role,
551
- content: msg.content || '',
552
- };
553
-
554
- if (msg.toolCalls && msg.toolCalls.length > 0) {
555
- cleaned.toolCalls = msg.toolCalls;
556
- }
557
-
558
- if (msg.toolCallId) {
559
- cleaned.toolCallId = msg.toolCallId;
560
- }
1031
+ // Optimize messages to reduce token count
1032
+ const cleanedMessages = optimizeMessagesForLLM(messagesToUse);
561
1033
 
562
- return cleaned;
563
- });
1034
+ console.log('Continue with optimized messages:', cleanedMessages.length, 'messages');
564
1035
 
565
- console.log('Continue with cleaned messages:', JSON.stringify(cleanedMessages));
566
- console.log('Continue auth tokens:', { hasJwtToken: !!jwtToken, hasMcpApiKey: !!mcpApiKey });
1036
+ // Track ops: LLM call start for continuation
1037
+ const llmModel = currentProvider === 'gemini' ? 'gemini-1.5-pro' : 'gpt-4o';
1038
+ opsTracker.startLLMCall(currentProvider as LLMProvider, llmModel);
1039
+ const llmStartTime = Date.now();
567
1040
 
568
1041
  const response = await api.chat({
569
1042
  provider: currentProvider,
@@ -573,12 +1046,58 @@ export default function ChatPage() {
573
1046
  mcpApiKey: mcpApiKey || undefined, // MCP server API key
574
1047
  });
575
1048
 
576
- if (response.message) {
577
- addChatMessage(response.message);
578
- }
1049
+ // Track ops: LLM call complete with token estimates
1050
+ const inputTokenEstimate = Math.ceil(JSON.stringify(cleanedMessages).length / 4);
1051
+ const outputTokenEstimate = Math.ceil((response.message?.content?.length || 0) / 4);
1052
+ opsTracker.completeLLMCall(inputTokenEstimate, outputTokenEstimate);
579
1053
 
580
- // Recursive tool calls
1054
+ // Handle tool calls - attach results before adding message (same as handleSend)
581
1055
  if (response.toolCalls && response.toolResults) {
1056
+ // Track ops: Process each tool call
1057
+ const toolCallsWithResults = response.toolCalls.map((tc: ToolCall, i: number) => {
1058
+ const toolResult = response.toolResults[i];
1059
+
1060
+ // Track ops: Tool call start
1061
+ opsTracker.startToolCall(tc.id, tc.name, tc.arguments as Record<string, unknown>);
1062
+
1063
+ // Parse the result content
1064
+ let parsedResult;
1065
+ if (toolResult.content) {
1066
+ try {
1067
+ parsedResult = JSON.parse(toolResult.content);
1068
+
1069
+ // Unwrap if response was wrapped by TransformInterceptor
1070
+ if (parsedResult.success !== undefined && parsedResult.data !== undefined) {
1071
+ parsedResult = parsedResult.data;
1072
+ }
1073
+ } catch {
1074
+ parsedResult = { content: toolResult.content };
1075
+ }
1076
+ }
1077
+
1078
+ // Track ops: Tool call complete with result
1079
+ const resultNodeId = opsTracker.completeToolCall(tc.id, tc.name, parsedResult);
1080
+
1081
+ // Track ops: Widget render if tool has a widget
1082
+ const toolDef = tools.find(t => t.name === tc.name);
1083
+ const widgetUri = toolDef?.widget?.route || toolDef?.outputTemplate || toolDef?._meta?.['openai/outputTemplate'];
1084
+ if (widgetUri && parsedResult && resultNodeId) {
1085
+ opsTracker.trackWidget(widgetUri, tc.name, parsedResult, resultNodeId);
1086
+ }
1087
+
1088
+ return {
1089
+ ...tc,
1090
+ result: parsedResult,
1091
+ };
1092
+ });
1093
+
1094
+ // Add assistant message with tool calls (with results attached)
1095
+ if (response.message) {
1096
+ response.message.toolCalls = toolCallsWithResults;
1097
+ addChatMessage(response.message);
1098
+ }
1099
+
1100
+ // Add tool result messages
582
1101
  const newToolResults: ChatMessage[] = [];
583
1102
  for (const result of response.toolResults) {
584
1103
  addChatMessage(result);
@@ -591,16 +1110,28 @@ export default function ChatPage() {
591
1110
  response.message!,
592
1111
  ...newToolResults,
593
1112
  ];
594
- await continueChatWithToolResults(apiKey, nextMessages);
1113
+ await continueChatWithToolResults(apiKey, nextMessages, false);
1114
+ } else {
1115
+ // No tool calls, just add the message
1116
+ if (response.message) {
1117
+ addChatMessage(response.message);
1118
+
1119
+ // Track ops: Final response
1120
+ opsTracker.trackResponse(
1121
+ response.message.content || '',
1122
+ outputTokenEstimate
1123
+ );
1124
+ }
595
1125
  }
596
1126
  } catch (error) {
597
1127
  console.error('Continuation error:', error);
1128
+ // Track ops: Error
1129
+ opsTracker.errorLLMCall();
598
1130
  }
599
1131
  };
600
1132
 
601
1133
  const saveApiKey = (provider: 'openai' | 'gemini') => {
602
- const input = document.getElementById(`${provider}-api-key`) as HTMLInputElement;
603
- const key = input?.value.trim();
1134
+ const key = provider === 'openai' ? openaiApiKeyInput.trim() : geminiApiKeyInput.trim();
604
1135
 
605
1136
  if (!key || key === '••••••••') {
606
1137
  alert('Please enter a valid API key');
@@ -608,207 +1139,512 @@ export default function ChatPage() {
608
1139
  }
609
1140
 
610
1141
  localStorage.setItem(`${provider}_api_key`, key);
611
- input.value = '••••••••';
1142
+ // Show masked value after save
1143
+ if (provider === 'openai') {
1144
+ setOpenaiApiKeyInput('••••••••');
1145
+ } else {
1146
+ setGeminiApiKeyInput('••••••••');
1147
+ }
612
1148
  alert(`${provider === 'openai' ? 'OpenAI' : 'Gemini'} API key saved`);
613
1149
  };
614
1150
 
615
1151
  return (
616
1152
  <div className="fixed inset-0 flex flex-col" style={{ left: 'var(--sidebar-width, 15rem)', backgroundColor: '#0a0a0a' }}>
617
- {/* Sticky Header */}
618
- <div className="sticky top-0 z-10 border-b border-border/50 px-3 sm:px-6 py-3 flex flex-col sm:flex-row items-start sm:items-center justify-between bg-card/80 backdrop-blur-md shadow-sm gap-3 sm:gap-0">
1153
+ {/* Minimal Professional Header */}
1154
+ <div className="sticky top-0 z-10 border-b border-border/50 px-3 sm:px-6 py-4 flex flex-col sm:flex-row items-start sm:items-center justify-between bg-card/50 backdrop-blur-sm gap-3 sm:gap-0">
619
1155
  <div className="flex items-center gap-3">
620
- <div className="w-8 h-8 rounded-lg bg-gradient-to-br from-primary to-amber-500 flex items-center justify-center shadow-md">
621
- <Bot className="w-5 h-5 text-white" strokeWidth={2.5} />
622
- </div>
623
- <div>
624
- <h1 className="text-lg font-bold text-foreground">AI Chat</h1>
625
- </div>
1156
+ <h1 className="text-lg font-semibold text-foreground">AI Chat</h1>
1157
+
1158
+ {/* Professional Voice Banner - shows when voice mode active */}
1159
+ {voiceModeEnabled && (
1160
+ <button
1161
+ onClick={() => setVoiceOverlayOpen(true)}
1162
+ className="flex items-center gap-3 bg-zinc-800/90 rounded-full px-4 py-2 hover:bg-zinc-700/90 transition-colors"
1163
+ >
1164
+ {/* Metallic Orb */}
1165
+ <div
1166
+ className="w-7 h-7 rounded-full flex-shrink-0"
1167
+ style={{
1168
+ background: 'conic-gradient(from 0deg, #9ca3af, #374151, #9ca3af, #374151, #9ca3af)',
1169
+ boxShadow: 'inset 0 2px 4px rgba(255,255,255,0.1), 0 2px 8px rgba(0,0,0,0.3)'
1170
+ }}
1171
+ />
1172
+ {/* State Text */}
1173
+ <span className="text-sm text-zinc-300">
1174
+ {llmState === 'listening' && 'Listening'}
1175
+ {llmState === 'thinking' && 'Processing'}
1176
+ {llmState === 'speaking' && 'Speaking'}
1177
+ {llmState === 'idle' && 'Ready'}
1178
+ </span>
1179
+ </button>
1180
+ )}
626
1181
  </div>
627
1182
 
628
1183
  <div className="flex items-center gap-2 w-full sm:w-auto">
629
- <select
630
- value={currentProvider}
631
- onChange={(e) => setCurrentProvider(e.target.value as 'openai' | 'gemini')}
632
- className="input text-sm px-3 py-1.5 w-full sm:w-28 flex-1 sm:flex-none"
1184
+ {/* Voice Output Toggle */}
1185
+ {elevenLabsApiKey && (
1186
+ <button
1187
+ onClick={() => {
1188
+ if (llmState === 'speaking') stopSpeaking();
1189
+ setVoiceModeEnabled(!voiceModeEnabled);
1190
+ }}
1191
+ className={`h-8 w-8 rounded-lg flex items-center justify-center transition-all flex-shrink-0 ${voiceModeEnabled
1192
+ ? 'bg-primary/20 text-primary ring-1 ring-primary/50'
1193
+ : 'bg-muted/50 text-muted-foreground hover:text-foreground'
1194
+ }`}
1195
+ title={voiceModeEnabled ? "Disable Voice Output" : "Enable Voice Output"}
1196
+ >
1197
+ {llmState === 'speaking' ? <SpeakerWaveIcon className="h-4 w-4 animate-pulse" /> : <MicrophoneIcon className="h-4 w-4" />}
1198
+ </button>
1199
+ )}
1200
+
1201
+
1202
+ {/* Ops View Toggle */}
1203
+ <button
1204
+ onClick={toggleOpsView}
1205
+ className={`h-8 w-8 rounded-lg flex items-center justify-center transition-all flex-shrink-0 ${isOpsViewOpen
1206
+ ? 'bg-indigo-500/20 text-indigo-400 ring-1 ring-indigo-500/40'
1207
+ : 'bg-zinc-800/80 text-zinc-500 hover:bg-zinc-700/80 hover:text-zinc-300'
1208
+ }`}
1209
+ title="Operations"
633
1210
  >
634
- <option value="gemini">Gemini</option>
635
- <option value="openai">OpenAI</option>
636
- </select>
1211
+ <svg className="h-4 w-4" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="1.5">
1212
+ <path strokeLinecap="round" strokeLinejoin="round" d="M3.75 6A2.25 2.25 0 016 3.75h2.25A2.25 2.25 0 0110.5 6v2.25a2.25 2.25 0 01-2.25 2.25H6a2.25 2.25 0 01-2.25-2.25V6zM3.75 15.75A2.25 2.25 0 016 13.5h2.25a2.25 2.25 0 012.25 2.25V18a2.25 2.25 0 01-2.25 2.25H6A2.25 2.25 0 013.75 18v-2.25zM13.5 6a2.25 2.25 0 012.25-2.25H18A2.25 2.25 0 0120.25 6v2.25A2.25 2.25 0 0118 10.5h-2.25a2.25 2.25 0 01-2.25-2.25V6zM13.5 15.75a2.25 2.25 0 012.25-2.25H18a2.25 2.25 0 012.25 2.25V18A2.25 2.25 0 0118 20.25h-2.25A2.25 2.25 0 0113.5 18v-2.25z" />
1213
+ </svg>
1214
+ </button>
1215
+
637
1216
  <button
638
1217
  onClick={() => setShowSettings(!showSettings)}
639
- className={`w-8 h-8 rounded-lg flex items-center justify-center transition-all flex-shrink-0 ${showSettings
1218
+ className={`h-8 w-8 rounded-lg flex items-center justify-center transition-all flex-shrink-0 ${showSettings
640
1219
  ? 'bg-primary/10 text-primary ring-1 ring-primary/30'
641
1220
  : 'bg-muted/50 text-muted-foreground hover:bg-muted hover:text-foreground'
642
1221
  }`}
643
1222
  title="Settings"
644
1223
  >
645
- <Settings className="w-4 h-4" />
1224
+ <Cog6ToothIcon className="h-4 w-4" />
646
1225
  </button>
647
1226
  <button
648
- onClick={clearChat}
649
- className="w-8 h-8 rounded-lg flex items-center justify-center bg-muted/50 text-muted-foreground hover:bg-muted hover:text-foreground transition-all flex-shrink-0"
1227
+ onClick={() => {
1228
+ clearChat();
1229
+ clearOpsSessionStore();
1230
+ }}
1231
+ className="h-8 w-8 rounded-lg flex items-center justify-center bg-muted/50 text-muted-foreground hover:bg-muted hover:text-foreground transition-all flex-shrink-0"
650
1232
  title="Clear chat"
651
1233
  >
652
- <Trash2 className="w-4 h-4" />
1234
+ <TrashIcon className="h-4 w-4" />
653
1235
  </button>
654
1236
  </div>
655
1237
  </div>
656
1238
 
657
- {/* Enhanced Settings Panel */}
1239
+ {/* Enhanced Settings Side Drawer - Animated from Left */}
658
1240
  {showSettings && (
659
- <div className="border-b border-border/50 px-3 sm:px-6 py-4 sm:py-5 bg-muted/20 backdrop-blur-md shadow-sm">
660
- <div className="max-w-4xl mx-auto">
661
- <div className="flex items-start justify-between mb-4">
662
- <div>
663
- <h3 className="text-sm font-semibold text-foreground flex items-center gap-2">
664
- <Settings className="w-4 h-4" />
665
- API Configuration
666
- </h3>
667
- <p className="text-xs text-muted-foreground mt-1">Configure your AI provider API keys to enable chat functionality</p>
1241
+ <div
1242
+ className="fixed inset-0 z-50 bg-black/50 backdrop-blur-sm transition-opacity"
1243
+ onClick={() => setShowSettings(false)}
1244
+ >
1245
+ <div
1246
+ className="absolute right-0 top-0 h-full w-[400px] bg-card/95 backdrop-blur-xl border-l border-border shadow-2xl animate-slide-in-right overflow-y-auto"
1247
+ onClick={(e) => e.stopPropagation()}
1248
+ >
1249
+ <div className="p-6">
1250
+ <div className="flex items-center justify-between mb-8">
1251
+ <div>
1252
+ <h2 className="text-xl font-bold bg-gradient-to-r from-primary to-secondary bg-clip-text text-transparent">Settings</h2>
1253
+ <p className="text-sm text-muted-foreground mt-1">Configure your workspace</p>
1254
+ </div>
1255
+ <button
1256
+ onClick={() => setShowSettings(false)}
1257
+ className="p-2 rounded-full hover:bg-muted/50 transition-colors"
1258
+ >
1259
+ <XMarkIcon className="w-5 h-5 text-muted-foreground" />
1260
+ </button>
668
1261
  </div>
669
- </div>
670
1262
 
671
- <div className="grid grid-cols-1 md:grid-cols-2 gap-4">
672
- {/* OpenAI Section */}
673
- <div className="card p-4">
674
- <div className="flex items-center justify-between mb-3">
675
- <label className="text-xs font-semibold text-foreground flex items-center gap-2">
676
- <div className="w-6 h-6 rounded bg-green-500/10 flex items-center justify-center">
677
- <span className="text-xs font-bold text-green-600">AI</span>
1263
+ <div className="space-y-8">
1264
+ {/* AI Provider Selection */}
1265
+ <section>
1266
+ <h3 className="text-sm font-semibold text-foreground uppercase tracking-wider mb-4 flex items-center gap-2">
1267
+ <SparklesIcon className="w-4 h-4 text-primary" />
1268
+ AI Model
1269
+ </h3>
1270
+ <div className="card p-1">
1271
+ <div className="grid grid-cols-2 p-1 gap-1 bg-muted/30 rounded-lg">
1272
+ <button
1273
+ onClick={() => setCurrentProvider('gemini')}
1274
+ className={`flex items-center justify-center gap-2 py-2.5 rounded-md text-sm font-medium transition-all ${currentProvider === 'gemini'
1275
+ ? 'bg-background shadow-sm text-foreground ring-1 ring-border'
1276
+ : 'text-muted-foreground hover:text-foreground'
1277
+ }`}
1278
+ >
1279
+ <div className="w-4 h-4 rounded-sm bg-blue-500/20 flex items-center justify-center">
1280
+ <span className="text-[10px] font-bold text-blue-600">G</span>
1281
+ </div>
1282
+ Gemini
1283
+ </button>
1284
+ <button
1285
+ onClick={() => setCurrentProvider('openai')}
1286
+ className={`flex items-center justify-center gap-2 py-2.5 rounded-md text-sm font-medium transition-all ${currentProvider === 'openai'
1287
+ ? 'bg-background shadow-sm text-foreground ring-1 ring-border'
1288
+ : 'text-muted-foreground hover:text-foreground'
1289
+ }`}
1290
+ >
1291
+ <div className="w-4 h-4 rounded-sm bg-green-500/20 flex items-center justify-center">
1292
+ <span className="text-[10px] font-bold text-green-600">AI</span>
1293
+ </div>
1294
+ OpenAI
1295
+ </button>
678
1296
  </div>
679
- OpenAI API Key
680
- </label>
681
- <a
682
- href="https://platform.openai.com/api-keys"
683
- target="_blank"
684
- rel="noopener noreferrer"
685
- className="text-xs text-primary hover:text-primary/80 flex items-center gap-1 transition-colors"
686
- >
687
- Get Key <ExternalLink className="w-3 h-3" />
688
- </a>
689
- </div>
690
- <div className="flex gap-2 mb-3">
691
- <input
692
- id="openai-api-key"
693
- type="password"
694
- className="input flex-1 text-sm py-2"
695
- placeholder="sk-proj-..."
696
- />
697
- <button onClick={() => saveApiKey('openai')} className="btn btn-primary text-xs px-4 py-2">
698
- <Save className="w-3 h-3 mr-1" />
699
- Save
700
- </button>
701
- </div>
702
- <div className="flex items-start gap-2 p-2 bg-blue-500/5 rounded-lg border border-blue-500/10">
703
- <Info className="w-3 h-3 text-blue-500 mt-0.5 flex-shrink-0" />
704
- <div className="text-xs text-muted-foreground">
705
- <p className="mb-1">
706
- <strong>How to get:</strong> Sign up at{' '}
707
- <a href="https://platform.openai.com/signup" target="_blank" rel="noopener noreferrer" className="text-primary hover:underline">
708
- OpenAI Platform
709
- </a>
710
- , navigate to API Keys, and create a new secret key.
1297
+ <p className="p-3 text-xs text-muted-foreground border-t border-border/50 mt-1">
1298
+ {currentProvider === 'gemini' ? 'Google Gemini Pro 1.5 - Great for general reasoning and large context.' : 'GPT-4o - Best in class reasoning and code generation.'}
711
1299
  </p>
712
- <a
713
- href="https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key"
714
- target="_blank"
715
- rel="noopener noreferrer"
716
- className="text-primary hover:underline inline-flex items-center gap-1"
717
- >
718
- View Guide <ExternalLink className="w-2.5 h-2.5" />
719
- </a>
720
1300
  </div>
721
- </div>
722
- </div>
1301
+ </section>
1302
+
1303
+ <hr className="border-border/50" />
1304
+
1305
+ {/* API Keys Configuration */}
1306
+ <section>
1307
+ <h3 className="text-sm font-semibold text-foreground uppercase tracking-wider mb-4 flex items-center gap-2">
1308
+ <Cog6ToothIcon className="w-4 h-4 text-primary" />
1309
+ API Credentials
1310
+ </h3>
1311
+
1312
+ <div className="space-y-4">
1313
+ {/* OpenAI Section */}
1314
+ <div className="card p-4 card-hover">
1315
+ <div className="flex items-center justify-between mb-3">
1316
+ <label className="text-sm font-medium text-foreground flex items-center gap-2">
1317
+ <div className="w-6 h-6 rounded bg-green-500/10 flex items-center justify-center">
1318
+ <span className="text-xs font-bold text-green-600">AI</span>
1319
+ </div>
1320
+ OpenAI
1321
+ </label>
1322
+ <a
1323
+ href="https://platform.openai.com/api-keys"
1324
+ target="_blank"
1325
+ rel="noopener noreferrer"
1326
+ className="text-xs text-primary hover:text-primary/80 flex items-center gap-1"
1327
+ >
1328
+ Get Key <ArrowTopRightOnSquareIcon className="w-3 h-3" />
1329
+ </a>
1330
+ </div>
1331
+ <div className="flex gap-2">
1332
+ <input
1333
+ id="openai-api-key"
1334
+ type="password"
1335
+ className="input flex-1 text-sm bg-background/50"
1336
+ placeholder="sk-proj-..."
1337
+ value={openaiApiKeyInput}
1338
+ onChange={(e) => setOpenaiApiKeyInput(e.target.value)}
1339
+ />
1340
+ <button onClick={() => saveApiKey('openai')} className="btn btn-primary btn-sm px-4">
1341
+ Save
1342
+ </button>
1343
+ </div>
1344
+ </div>
723
1345
 
724
- {/* Gemini Section */}
725
- <div className="card p-4">
726
- <div className="flex items-center justify-between mb-3">
727
- <label className="text-xs font-semibold text-foreground flex items-center gap-2">
728
- <div className="w-6 h-6 rounded bg-blue-500/10 flex items-center justify-center">
729
- <span className="text-xs font-bold text-blue-600">G</span>
1346
+ {/* Gemini Section */}
1347
+ <div className="card p-4 card-hover">
1348
+ <div className="flex items-center justify-between mb-3">
1349
+ <label className="text-sm font-medium text-foreground flex items-center gap-2">
1350
+ <div className="w-6 h-6 rounded bg-blue-500/10 flex items-center justify-center">
1351
+ <span className="text-xs font-bold text-blue-600">G</span>
1352
+ </div>
1353
+ Gemini
1354
+ </label>
1355
+ <a
1356
+ href="https://aistudio.google.com/app/apikey"
1357
+ target="_blank"
1358
+ rel="noopener noreferrer"
1359
+ className="text-xs text-primary hover:text-primary/80 flex items-center gap-1"
1360
+ >
1361
+ Get Key <ArrowTopRightOnSquareIcon className="w-3 h-3" />
1362
+ </a>
1363
+ </div>
1364
+ <div className="flex gap-2">
1365
+ <input
1366
+ id="gemini-api-key"
1367
+ type="password"
1368
+ className="input flex-1 text-sm bg-background/50"
1369
+ placeholder="AIza..."
1370
+ value={geminiApiKeyInput}
1371
+ onChange={(e) => setGeminiApiKeyInput(e.target.value)}
1372
+ />
1373
+ <button onClick={() => saveApiKey('gemini')} className="btn btn-primary btn-sm px-4">
1374
+ Save
1375
+ </button>
1376
+ </div>
730
1377
  </div>
731
- Gemini API Key
732
- </label>
733
- <a
734
- href="https://aistudio.google.com/app/apikey"
735
- target="_blank"
736
- rel="noopener noreferrer"
737
- className="text-xs text-primary hover:text-primary/80 flex items-center gap-1 transition-colors"
738
- >
739
- Get Key <ExternalLink className="w-3 h-3" />
740
- </a>
741
- </div>
742
- <div className="flex gap-2 mb-3">
743
- <input
744
- id="gemini-api-key"
745
- type="password"
746
- className="input flex-1 text-sm py-2"
747
- placeholder="AIza..."
748
- />
749
- <button onClick={() => saveApiKey('gemini')} className="btn btn-primary text-xs px-4 py-2">
750
- <Save className="w-3 h-3 mr-1" />
751
- Save
752
- </button>
753
- </div>
754
- <div className="flex items-start gap-2 p-2 bg-blue-500/5 rounded-lg border border-blue-500/10">
755
- <Info className="w-3 h-3 text-blue-500 mt-0.5 flex-shrink-0" />
756
- <div className="text-xs text-muted-foreground">
757
- <p className="mb-1">
758
- <strong>How to get:</strong> Visit{' '}
759
- <a href="https://aistudio.google.com" target="_blank" rel="noopener noreferrer" className="text-primary hover:underline">
760
- Google AI Studio
761
- </a>
762
- , sign in with your Google account, and click "Get API key".
763
- </p>
764
- <a
765
- href="https://ai.google.dev/gemini-api/docs/api-key"
766
- target="_blank"
767
- rel="noopener noreferrer"
768
- className="text-primary hover:underline inline-flex items-center gap-1"
769
- >
770
- View Guide <ExternalLink className="w-2.5 h-2.5" />
771
- </a>
772
1378
  </div>
773
- </div>
774
- </div>
775
- </div>
1379
+ </section>
1380
+
1381
+ <hr className="border-border/50" />
1382
+
1383
+ {/* Voice Configuration - Inline (Matches Global Settings) */}
1384
+ <section className="space-y-4 pt-4 border-t border-border">
1385
+ <div className="flex items-center justify-between">
1386
+ <label className="text-xs font-bold text-muted-foreground uppercase tracking-wider flex items-center gap-2">
1387
+ <MicrophoneIcon className="w-3 h-3" /> Voice Integration
1388
+ </label>
1389
+ {elevenLabsApiKey && <span className="text-[10px] bg-purple-500/10 text-purple-600 px-2 py-0.5 rounded-full font-medium border border-purple-500/20">Enabled</span>}
1390
+ </div>
1391
+
1392
+ <div className="bg-muted/10 rounded-xl border border-border p-4 space-y-4">
1393
+ {/* API Key Input */}
1394
+ <div>
1395
+ <label className="block text-xs font-medium text-foreground mb-1.5 flex items-center justify-between">
1396
+ <span>ElevenLabs API Key</span>
1397
+ <a
1398
+ href="https://elevenlabs.io/api"
1399
+ target="_blank"
1400
+ rel="noopener noreferrer"
1401
+ className="text-[10px] text-primary hover:underline flex items-center gap-1"
1402
+ >
1403
+ Get Key <ArrowTopRightOnSquareIcon className="w-2.5 h-2.5" />
1404
+ </a>
1405
+ </label>
1406
+ <div className="relative">
1407
+ <input
1408
+ type="password"
1409
+ value={elevenLabsApiKey || ''}
1410
+ onChange={(e) => setElevenLabsApiKey(e.target.value || null)}
1411
+ className="input w-full font-mono text-xs bg-background/50"
1412
+ placeholder={elevenLabsApiKey ? "••••••••••••••••" : "Paste your xi-api-key here"}
1413
+ />
1414
+ {elevenLabsApiKey && (
1415
+ <button
1416
+ onClick={() => setElevenLabsApiKey(null)}
1417
+ className="absolute right-2 top-1.5 text-[10px] text-destructive hover:underline"
1418
+ >
1419
+ Clear
1420
+ </button>
1421
+ )}
1422
+ </div>
1423
+ </div>
1424
+
1425
+ {/* Inline Configuration (Only if Key is set) */}
1426
+ {elevenLabsApiKey ? (
1427
+ <div className="space-y-3 animate-fade-in pt-2 border-t border-border/50">
1428
+ {/* TTS Model */}
1429
+ <div>
1430
+ <label className="block text-xs font-medium text-foreground mb-1.5">Voice Model</label>
1431
+ <select
1432
+ value={voiceModel}
1433
+ onChange={(e) => {
1434
+ setVoiceModel(e.target.value);
1435
+ localStorage.setItem('voice_model', e.target.value);
1436
+ }}
1437
+ className="input w-full text-xs bg-background/50"
1438
+ >
1439
+ {availableModels.length > 0 ? (
1440
+ availableModels.filter(m => m.model_id.includes('eleven')).map(model => (
1441
+ <option key={model.model_id} value={model.model_id}>
1442
+ {model.name}
1443
+ </option>
1444
+ ))
1445
+ ) : (
1446
+ <>
1447
+ <option value="eleven_multilingual_v2">Multilingual v2</option>
1448
+ <option value="eleven_flash_v2_5">Flash v2.5</option>
1449
+ <option value="eleven_turbo_v2_5">Turbo v2.5</option>
1450
+ </>
1451
+ )}
1452
+ </select>
1453
+ </div>
1454
+
1455
+ {/* Voice Selection */}
1456
+ <div>
1457
+ <label className="block text-xs font-medium text-foreground mb-1.5">Voice Character</label>
1458
+ <select
1459
+ value={voiceId}
1460
+ onChange={(e) => {
1461
+ setVoiceId(e.target.value);
1462
+ localStorage.setItem('voice_id', e.target.value);
1463
+ }}
1464
+ className="input w-full text-xs bg-background/50"
1465
+ >
1466
+ {availableVoices.length > 0 ? (
1467
+ availableVoices.map(voice => (
1468
+ <option key={voice.voice_id} value={voice.voice_id}>
1469
+ {voice.name} {voice.labels?.accent ? `(${voice.labels.accent})` : ''}
1470
+ </option>
1471
+ ))
1472
+ ) : (
1473
+ <>
1474
+ <option value="21m00Tcm4TlvDq8ikWAM">Rachel (English)</option>
1475
+ <option value="EXAVITQu4vr4xnSDxMaL">Bella (English)</option>
1476
+ </>
1477
+ )}
1478
+ </select>
1479
+ </div>
1480
+
1481
+ <div className="grid grid-cols-2 gap-2">
1482
+ {/* Output Language */}
1483
+ <div>
1484
+ <label className="block text-xs font-medium text-foreground mb-1.5">Output Lang</label>
1485
+ <select
1486
+ value={outputLanguage}
1487
+ onChange={(e) => {
1488
+ setOutputLanguage(e.target.value);
1489
+ localStorage.setItem('output_language', e.target.value);
1490
+ }}
1491
+ className="input w-full text-xs bg-background/50"
1492
+ >
1493
+ {Object.entries(LANG_PRESETS).map(([code, preset]) => (
1494
+ <option key={code} value={code}>{preset.name}</option>
1495
+ ))}
1496
+ </select>
1497
+ </div>
776
1498
 
777
- {/* Security Notice */}
778
- <div className="mt-4 p-3 bg-amber-500/5 rounded-lg border border-amber-500/10">
779
- <div className="flex items-start gap-2">
780
- <Info className="w-4 h-4 text-amber-500 mt-0.5 flex-shrink-0" />
781
- <div className="text-xs text-muted-foreground">
782
- <strong className="text-foreground">Security Note:</strong> Your API keys are stored locally in your browser and never sent to our servers.
783
- Keep them confidential and avoid sharing them publicly.
1499
+ {/* Input Language */}
1500
+ <div>
1501
+ <label className="block text-xs font-medium text-foreground mb-1.5">Input Lang</label>
1502
+ <select
1503
+ value={inputLanguage}
1504
+ onChange={(e) => {
1505
+ setInputLanguage(e.target.value);
1506
+ localStorage.setItem('input_language', e.target.value);
1507
+ }}
1508
+ className="input w-full text-xs bg-background/50"
1509
+ >
1510
+ <option value="en-US">English (US)</option>
1511
+ <option value="en-GB">English (UK)</option>
1512
+ <option value="hi-IN">Hindi</option>
1513
+ <option value="es-ES">Spanish</option>
1514
+ <option value="fr-FR">French</option>
1515
+ <option value="de-DE">German</option>
1516
+ <option value="ja-JP">Japanese</option>
1517
+ </select>
1518
+ </div>
1519
+ </div>
1520
+ </div>
1521
+ ) : (
1522
+ <div className="p-3 bg-muted/30 rounded-lg border border-dashed border-border text-center">
1523
+ <p className="text-xs text-muted-foreground">Add API key to unlock premium voice capabilities.</p>
1524
+ </div>
1525
+ )}
1526
+ </div>
1527
+ </section>
1528
+
1529
+ <div className="pt-4">
1530
+ <p className="text-[10px] text-muted-foreground/50 text-center">
1531
+ NitroStudio v1.0.0 • Local Environment
1532
+ </p>
784
1533
  </div>
1534
+
785
1535
  </div>
786
1536
  </div>
787
1537
  </div>
788
1538
  </div>
789
1539
  )}
790
1540
 
791
- {/* ChatGPT-style Messages Container - ONLY this scrolls */}
792
- <div className="flex-1 overflow-y-auto overflow-x-hidden">
793
- <div className="max-w-5xl mx-auto px-4 py-6 space-y-6 min-h-full">
1541
+ {/* Main Content Area with optional Ops split view */}
1542
+ <div className="flex-1 flex overflow-hidden min-h-0">
1543
+ {/* Chat Messages Container */}
1544
+ <div className={`flex flex-col overflow-hidden transition-all duration-300 ${isOpsViewOpen ? 'w-3/5' : 'w-full'}`}>
1545
+ <div className="flex-1 overflow-y-auto overflow-x-hidden">
1546
+ <div className={`mx-auto px-4 py-6 space-y-6 ${isOpsViewOpen ? 'max-w-3xl' : 'max-w-5xl'}`}>
794
1547
  {chatMessages.length === 0 && !loading ? (
795
1548
  /* Welcome Screen */
796
1549
  <div className="flex flex-col items-center justify-center min-h-[calc(100vh-300px)] animate-fade-in">
797
- <div className="w-16 h-16 rounded-2xl bg-gradient-to-br from-primary to-amber-500 flex items-center justify-center shadow-xl mb-6">
798
- <Bot className="w-10 h-10 text-white" strokeWidth={2.5} />
799
- </div>
800
1550
 
801
- <h2 className="text-3xl font-bold text-foreground mb-3">Welcome to NitroStudio</h2>
802
- <p className="text-muted-foreground text-center max-w-md mb-8">
803
- Your AI-powered development environment for Model Context Protocol (MCP) servers.
804
- Start a conversation or try a prompt below.
805
- </p>
1551
+ {/* Voice Mode UI - Only when ElevenLabs key is set */}
1552
+ {elevenLabsApiKey ? (
1553
+ <div className="flex flex-col items-center">
1554
+ {/* Custom Voice Orb - Same as VoiceOrbOverlay */}
1555
+ <button
1556
+ onClick={() => {
1557
+ // Apply language preset
1558
+ const preset = LANG_PRESETS[outputLanguage] || LANG_PRESETS['en'];
1559
+ setVoiceModel(preset.model);
1560
+ setVoiceId(preset.voice);
1561
+ setInputLanguage(preset.input);
1562
+ // Start voice mode
1563
+ setVoiceOverlayOpen(true);
1564
+ setVoiceModeEnabled(true);
1565
+ }}
1566
+ className="group relative w-44 h-44 rounded-full mb-6 cursor-pointer transition-transform duration-500 hover:scale-105"
1567
+ >
1568
+ {/* Rotating gradient ring - EXACT from VoiceOrbOverlay idle state */}
1569
+ <div
1570
+ className="absolute inset-0 rounded-full"
1571
+ style={{
1572
+ background: 'conic-gradient(from 0deg, #475569, #64748b, #475569)',
1573
+ padding: '3px',
1574
+ borderRadius: '50%'
1575
+ }}
1576
+ >
1577
+ {/* Inner orb */}
1578
+ <div
1579
+ className="w-full h-full rounded-full bg-[#0a0a0a] flex items-center justify-center"
1580
+ style={{
1581
+ boxShadow: '0 0 30px 5px rgba(71, 85, 105, 0.15)'
1582
+ }}
1583
+ >
1584
+ {/* Center gradient - EXACT from VoiceOrbOverlay idle state */}
1585
+ <div
1586
+ className="w-32 h-32 rounded-full flex items-center justify-center"
1587
+ style={{
1588
+ background: 'radial-gradient(circle, #64748b 0%, #0a0a0a 60%)'
1589
+ }}
1590
+ >
1591
+ {/* Small Mic Icon */}
1592
+ <svg className="w-10 h-10 text-slate-400/70 group-hover:text-slate-300 transition-colors" fill="none" stroke="currentColor" viewBox="0 0 24 24">
1593
+ <path strokeLinecap="round" strokeLinejoin="round" strokeWidth={1.5} d="M12 1a3 3 0 00-3 3v8a3 3 0 006 0V4a3 3 0 00-3-3z" />
1594
+ <path strokeLinecap="round" strokeLinejoin="round" strokeWidth={1.5} d="M19 10v2a7 7 0 01-14 0v-2M12 19v4M8 23h8" />
1595
+ </svg>
1596
+ </div>
1597
+ </div>
1598
+ </div>
1599
+ </button>
1600
+
1601
+ {/* Language Dropdown */}
1602
+ <select
1603
+ value={outputLanguage}
1604
+ onChange={(e) => {
1605
+ const lang = e.target.value;
1606
+ const preset = LANG_PRESETS[lang];
1607
+ if (preset) {
1608
+ setOutputLanguage(lang);
1609
+ setInputLanguage(preset.input);
1610
+ setVoiceModel(preset.model);
1611
+ setVoiceId(preset.voice);
1612
+ // Save to localStorage
1613
+ localStorage.setItem('output_language', lang);
1614
+ localStorage.setItem('input_language', preset.input);
1615
+ localStorage.setItem('voice_model', preset.model);
1616
+ localStorage.setItem('voice_id', preset.voice);
1617
+ }
1618
+ }}
1619
+ className="bg-muted/50 border border-border rounded-xl px-6 py-2.5 text-sm focus:outline-none focus:ring-2 focus:ring-primary/50 mb-4"
1620
+ >
1621
+ {Object.entries(LANG_PRESETS).map(([code, preset]) => (
1622
+ <option key={code} value={code}>{preset.name}</option>
1623
+ ))}
1624
+ </select>
1625
+
1626
+ <p className="text-sm text-muted-foreground/80 mb-8">Click to start voice conversation</p>
1627
+ </div>
1628
+ ) : (
1629
+ /* Traditional Welcome - Only when no ElevenLabs key */
1630
+ <>
1631
+ <div className="w-16 h-16 rounded bg-gradient-to-br from-primary to-secondary flex items-center justify-center shadow-xl mb-6">
1632
+ <SparklesIcon className="h-10 w-10 text-white" />
1633
+ </div>
1634
+
1635
+ <h2 className="text-3xl font-bold text-foreground mb-3">Welcome to NitroStudio</h2>
1636
+ <p className="text-muted-foreground text-center max-w-md mb-8">
1637
+ Your AI-powered development environment for Model Context Protocol (MCP) servers.
1638
+ Start a conversation or try a prompt below.
1639
+ </p>
1640
+ </>
1641
+ )}
806
1642
 
807
1643
  {/* Prompts Overview */}
808
1644
  {prompts.length > 0 && (
809
1645
  <div className="w-full max-w-2xl">
810
1646
  <div className="flex items-center gap-2 mb-4">
811
- <Sparkles className="w-5 h-5 text-primary" />
1647
+ <SparklesIcon className="h-5 w-5 text-primary" />
812
1648
  <h3 className="text-lg font-semibold text-foreground">Available Prompts</h3>
813
1649
  <span className="text-sm text-muted-foreground">({prompts.length})</span>
814
1650
  </div>
@@ -824,8 +1660,8 @@ export default function ChatPage() {
824
1660
  className="card card-hover p-4 text-left group transition-all hover:scale-[1.02]"
825
1661
  >
826
1662
  <div className="flex items-start gap-3">
827
- <div className="w-8 h-8 rounded-lg bg-primary/10 flex items-center justify-center group-hover:bg-primary/20 transition-colors flex-shrink-0">
828
- <FileText className="w-4 h-4 text-primary" />
1663
+ <div className="h-8 w-8 rounded-lg bg-primary/10 flex items-center justify-center group-hover:bg-primary/20 transition-colors flex-shrink-0">
1664
+ <DocumentTextIcon className="h-4 w-4 text-primary" />
829
1665
  </div>
830
1666
  <div className="flex-1 min-w-0">
831
1667
  <h4 className="font-semibold text-foreground text-sm mb-1 truncate">
@@ -884,10 +1720,10 @@ export default function ChatPage() {
884
1720
  ))}
885
1721
  {loading && (
886
1722
  <div className="flex gap-4 items-start animate-fade-in">
887
- <div className="w-8 h-8 rounded-full bg-gradient-to-br from-primary to-amber-500 flex items-center justify-center flex-shrink-0 shadow-md">
888
- <Bot className="w-5 h-5 text-white" strokeWidth={2.5} />
1723
+ <div className="h-8 w-8 rounded-full bg-gradient-to-br from-primary to-secondary flex items-center justify-center flex-shrink-0 shadow-md">
1724
+ <SparklesIcon className="h-5 w-5 text-white" />
889
1725
  </div>
890
- <div className="flex-1 bg-card/50 backdrop-blur-sm rounded-2xl px-5 py-4 border border-border/50">
1726
+ <div className="flex-1 bg-card/50 backdrop-blur-sm rounded px-5 py-4 border border-border/50">
891
1727
  <div className="flex items-center gap-2">
892
1728
  <div className="flex gap-1">
893
1729
  <span className="w-2 h-2 bg-primary rounded-full animate-bounce" style={{ animationDelay: '0s' }}></span>
@@ -902,86 +1738,285 @@ export default function ChatPage() {
902
1738
  </>
903
1739
  )}
904
1740
  <div ref={messagesEndRef} />
1741
+ </div>
1742
+ </div>
905
1743
  </div>
1744
+
1745
+ {/* Ops Canvas Panel */}
1746
+ {isOpsViewOpen && (
1747
+ <div className="w-2/5 min-w-[320px] border-l border-zinc-800 flex flex-col overflow-hidden">
1748
+ <OpsCanvas className="flex-1" />
1749
+ </div>
1750
+ )}
906
1751
  </div>
907
1752
 
908
- {/* ChatGPT-style Input Area - Fixed at bottom */}
909
- <div className="sticky bottom-0 border-t border-border/50 bg-background/95 backdrop-blur-md shadow-[0_-2px_10px_rgba(0,0,0,0.1)]">
910
- <div className="max-w-5xl mx-auto px-3 sm:px-4 py-3 sm:py-4">
1753
+ {/* Sleek Professional Input Area */}
1754
+ <div className="sticky bottom-0 bg-gradient-to-t from-background via-background to-transparent pt-6 pb-4">
1755
+ <div className="max-w-3xl mx-auto px-4">
1756
+ {/* Current file preview */}
911
1757
  {currentFile && (
912
- <div className="mb-3 p-3 bg-card rounded-xl flex items-start gap-3 border border-border/50 animate-fade-in">
913
- {currentFile.type.startsWith('image/') ? (
914
- <img
915
- src={currentFile.data}
916
- alt={currentFile.name}
917
- className="w-20 h-20 object-cover rounded-lg border border-border"
918
- />
919
- ) : (
920
- <div className="w-20 h-20 rounded-lg border border-border bg-muted flex items-center justify-center">
921
- <FileText className="w-8 h-8 text-muted-foreground" />
922
- </div>
923
- )}
924
- <div className="flex-1 min-w-0">
925
- <p className="text-sm font-medium text-foreground truncate">{currentFile.name}</p>
926
- <p className="text-xs text-muted-foreground">{currentFile.type}</p>
927
- </div>
928
- <button
929
- onClick={() => setCurrentFile(null)}
930
- className="w-7 h-7 rounded-lg flex items-center justify-center bg-muted/50 hover:bg-muted text-muted-foreground hover:text-foreground transition-all flex-shrink-0"
931
- >
932
- <X className="w-4 h-4" />
1758
+ <div className="mb-2 flex items-center gap-2 text-xs text-muted-foreground bg-muted/50 rounded-lg px-3 py-2">
1759
+ <PhotoIcon className="w-4 h-4" />
1760
+ <span className="truncate">{currentFile.name}</span>
1761
+ <button onClick={() => setCurrentFile(null)} className="ml-auto hover:text-foreground">
1762
+ <XMarkIcon className="w-4 h-4" />
933
1763
  </button>
934
1764
  </div>
935
1765
  )}
936
- <div className="flex items-center gap-2">
937
- <input
938
- type="file"
939
- ref={fileInputRef}
940
- onChange={handleFileUpload}
941
- accept="image/*,.pdf,.txt,.md,.json,.csv,.docx"
942
- className="hidden"
943
- />
944
- <button
945
- onClick={() => fileInputRef.current?.click()}
946
- className="h-11 w-11 rounded-xl flex items-center justify-center bg-muted/50 hover:bg-muted text-muted-foreground hover:text-foreground transition-all flex-shrink-0"
947
- title="Upload file"
948
- >
949
- <ImageIcon className="w-5 h-5" />
950
- </button>
951
- <div className="flex-1 relative flex items-center">
1766
+
1767
+ {/* Main Input Container */}
1768
+ <div className={`
1769
+ relative rounded-2xl border transition-all duration-300
1770
+ ${loading ? 'opacity-60' : ''}
1771
+ ${llmState !== 'idle' ? 'border-primary/50 shadow-lg shadow-primary/5' : 'border-border/60 hover:border-border focus-within:border-primary/30'}
1772
+ bg-card/80 backdrop-blur-sm
1773
+ `}>
1774
+ <div className="flex items-end gap-1 p-2">
1775
+ {/* File attachment dropdown */}
1776
+ <div className="relative group">
1777
+ <input
1778
+ type="file"
1779
+ ref={fileInputRef}
1780
+ onChange={handleFileUpload}
1781
+ accept="image/*,.pdf,.txt,.md,.json,.csv,.docx"
1782
+ className="hidden"
1783
+ />
1784
+ <button
1785
+ onClick={() => fileInputRef.current?.click()}
1786
+ className="p-2 rounded-xl text-muted-foreground hover:text-foreground hover:bg-muted/80 transition-all"
1787
+ title="Attach file"
1788
+ >
1789
+ <svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
1790
+ <path strokeLinecap="round" strokeLinejoin="round" strokeWidth={1.5} d="M12 4v16m8-8H4" />
1791
+ </svg>
1792
+ </button>
1793
+ </div>
1794
+
1795
+ {/* Text Input */}
952
1796
  <textarea
953
1797
  ref={textareaRef}
954
1798
  value={inputValue}
955
1799
  onChange={(e) => setInputValue(e.target.value)}
956
1800
  onKeyDown={(e) => {
957
- // Send on Enter, new line on Shift+Enter
958
1801
  if (e.key === 'Enter' && !e.shiftKey) {
959
1802
  e.preventDefault();
960
1803
  handleSend();
961
1804
  }
962
1805
  }}
963
- placeholder="Message NitroStudio... (Shift + Enter for new line)"
964
- className="w-full px-4 py-3 rounded-xl bg-card border border-border/50 focus:border-primary/50 focus:ring-2 focus:ring-primary/20 resize-none text-sm text-foreground placeholder:text-muted-foreground transition-all outline-none"
1806
+ placeholder="Message..."
1807
+ className="flex-1 bg-transparent border-0 focus:ring-0 resize-none py-2 px-1 text-sm min-h-[40px] max-h-[120px] placeholder:text-muted-foreground/50"
965
1808
  rows={1}
966
- style={{
967
- minHeight: '44px',
968
- maxHeight: '200px',
969
- overflow: 'hidden',
970
- }}
1809
+ disabled={loading}
971
1810
  />
1811
+
1812
+ {/* Right side buttons */}
1813
+ <div className="flex items-center gap-1">
1814
+ {/* Voice mode button */}
1815
+ {elevenLabsApiKey && (
1816
+ <button
1817
+ onClick={() => {
1818
+ setVoiceModeEnabled(true);
1819
+ setVoiceOverlayOpen(true);
1820
+ }}
1821
+ className={`p-2 rounded-xl transition-all ${voiceModeEnabled
1822
+ ? 'text-primary bg-primary/10'
1823
+ : 'text-muted-foreground hover:text-foreground hover:bg-muted/80'
1824
+ }`}
1825
+ title="Voice mode"
1826
+ >
1827
+ <MicrophoneIcon className="w-5 h-5" />
1828
+ </button>
1829
+ )}
1830
+
1831
+ {/* Send button */}
1832
+ <button
1833
+ onClick={() => handleSend()}
1834
+ disabled={loading || (!inputValue.trim() && !currentFile)}
1835
+ className={`p-2 rounded-xl transition-all ${inputValue.trim() || currentFile
1836
+ ? 'bg-primary text-primary-foreground hover:bg-primary/90'
1837
+ : 'text-muted-foreground/50 cursor-not-allowed'
1838
+ }`}
1839
+ title="Send"
1840
+ >
1841
+ <svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
1842
+ <path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M5 12h14M12 5l7 7-7 7" />
1843
+ </svg>
1844
+ </button>
1845
+ </div>
972
1846
  </div>
1847
+ </div>
1848
+
1849
+ {/* Minimal footer hint */}
1850
+ <p className="text-[10px] text-muted-foreground/40 text-center mt-2">
1851
+ Press Enter to send, Shift+Enter for new line
1852
+ </p>
1853
+ </div>
1854
+ </div>
1855
+
1856
+ {/* Voice Settings Modal - z-60 to be above voice overlay (z-50) */}
1857
+ {showVoiceSettings && (
1858
+ <div
1859
+ className="fixed inset-0 z-[60] flex items-center justify-center bg-black/80 backdrop-blur-sm"
1860
+ onClick={() => setShowVoiceSettings(false)}
1861
+ >
1862
+ <div
1863
+ className="bg-card border border-border rounded-2xl p-6 w-[450px] max-h-[85vh] overflow-auto shadow-2xl"
1864
+ onClick={(e) => e.stopPropagation()}
1865
+ >
1866
+ <div className="flex items-center justify-between mb-6">
1867
+ <h2 className="text-lg font-semibold">Voice Settings</h2>
1868
+ <button
1869
+ onClick={() => setShowVoiceSettings(false)}
1870
+ className="p-1 rounded-lg hover:bg-muted transition-colors"
1871
+ >
1872
+ <XMarkIcon className="w-5 h-5" />
1873
+ </button>
1874
+ </div>
1875
+
1876
+ {loadingVoiceData ? (
1877
+ <div className="flex items-center justify-center py-8">
1878
+ <div className="w-6 h-6 border-2 border-primary border-t-transparent rounded-full animate-spin" />
1879
+ <span className="ml-3 text-muted-foreground">Loading voice options...</span>
1880
+ </div>
1881
+ ) : (
1882
+ <div className="space-y-5">
1883
+ {/* Model Selection */}
1884
+ <div>
1885
+ <label className="block text-sm font-medium text-muted-foreground mb-2">
1886
+ TTS Model
1887
+ </label>
1888
+ <select
1889
+ value={voiceModel}
1890
+ onChange={(e) => setVoiceModel(e.target.value)}
1891
+ className="w-full bg-muted/50 border border-border rounded-xl px-3 py-2.5 text-sm focus:outline-none focus:ring-2 focus:ring-primary/50"
1892
+ >
1893
+ {availableModels.length > 0 ? (
1894
+ availableModels.filter(m => m.model_id.includes('eleven')).map(model => (
1895
+ <option key={model.model_id} value={model.model_id}>
1896
+ {model.name}
1897
+ </option>
1898
+ ))
1899
+ ) : (
1900
+ <>
1901
+ <option value="eleven_multilingual_v2">Multilingual v2</option>
1902
+ <option value="eleven_flash_v2_5">Flash v2.5</option>
1903
+ <option value="eleven_turbo_v2_5">Turbo v2.5</option>
1904
+ </>
1905
+ )}
1906
+ </select>
1907
+ </div>
1908
+
1909
+ {/* Output Language */}
1910
+ <div>
1911
+ <label className="block text-sm font-medium text-muted-foreground mb-2">
1912
+ Output Language (TTS)
1913
+ </label>
1914
+ <select
1915
+ value={outputLanguage}
1916
+ onChange={(e) => setOutputLanguage(e.target.value)}
1917
+ className="w-full bg-muted/50 border border-border rounded-xl px-3 py-2.5 text-sm focus:outline-none focus:ring-2 focus:ring-primary/50"
1918
+ >
1919
+ {/* Get languages from selected model if available */}
1920
+ {(() => {
1921
+ const selectedModel = availableModels.find(m => m.model_id === voiceModel);
1922
+ if (selectedModel?.languages && selectedModel.languages.length > 0) {
1923
+ return selectedModel.languages.map(lang => (
1924
+ <option key={lang.language_id} value={lang.language_id}>
1925
+ {lang.name}
1926
+ </option>
1927
+ ));
1928
+ }
1929
+ return (
1930
+ <>
1931
+ <option value="en">English</option>
1932
+ <option value="hi">Hindi</option>
1933
+ <option value="es">Spanish</option>
1934
+ <option value="fr">French</option>
1935
+ <option value="de">German</option>
1936
+ <option value="ja">Japanese</option>
1937
+ <option value="ko">Korean</option>
1938
+ <option value="zh">Chinese</option>
1939
+ <option value="pt">Portuguese</option>
1940
+ <option value="it">Italian</option>
1941
+ </>
1942
+ );
1943
+ })()}
1944
+ </select>
1945
+ </div>
1946
+
1947
+ {/* Voice Character - pre-filtered by language from API */}
1948
+ <div>
1949
+ <label className="block text-sm font-medium text-muted-foreground mb-2">
1950
+ Voice Character
1951
+ </label>
1952
+ <select
1953
+ value={voiceId}
1954
+ onChange={(e) => setVoiceId(e.target.value)}
1955
+ className="w-full bg-muted/50 border border-border rounded-xl px-3 py-2.5 text-sm focus:outline-none focus:ring-2 focus:ring-primary/50"
1956
+ >
1957
+ {availableVoices.length > 0 ? (
1958
+ availableVoices.map(voice => (
1959
+ <option key={voice.voice_id} value={voice.voice_id}>
1960
+ {voice.name} {voice.labels?.accent ? `(${voice.labels.accent})` : voice.category === 'shared' ? '(Shared)' : ''}
1961
+ </option>
1962
+ ))
1963
+ ) : (
1964
+ <>
1965
+ <option value="21m00Tcm4TlvDq8ikWAM">Rachel (English)</option>
1966
+ <option value="EXAVITQu4vr4xnSDxMaL">Bella (English)</option>
1967
+ </>
1968
+ )}
1969
+ </select>
1970
+ <p className="text-xs text-muted-foreground/60 mt-1">
1971
+ {loadingVoiceData ? 'Loading voices...' : `${availableVoices.length} voices for ${outputLanguage.toUpperCase()}`}
1972
+ </p>
1973
+ </div>
1974
+
1975
+ {/* Input Language (Speech Recognition) */}
1976
+ <div>
1977
+ <label className="block text-sm font-medium text-muted-foreground mb-2">
1978
+ Input Language (Speech Recognition)
1979
+ </label>
1980
+ <select
1981
+ value={inputLanguage}
1982
+ onChange={(e) => setInputLanguage(e.target.value)}
1983
+ className="w-full bg-muted/50 border border-border rounded-xl px-3 py-2.5 text-sm focus:outline-none focus:ring-2 focus:ring-primary/50"
1984
+ >
1985
+ <option value="en-US">English (US)</option>
1986
+ <option value="en-GB">English (UK)</option>
1987
+ <option value="hi-IN">Hindi</option>
1988
+ <option value="es-ES">Spanish</option>
1989
+ <option value="fr-FR">French</option>
1990
+ <option value="de-DE">German</option>
1991
+ <option value="ja-JP">Japanese</option>
1992
+ <option value="ko-KR">Korean</option>
1993
+ <option value="zh-CN">Chinese (Mandarin)</option>
1994
+ <option value="pt-BR">Portuguese (Brazil)</option>
1995
+ <option value="it-IT">Italian</option>
1996
+ </select>
1997
+ <p className="text-xs text-muted-foreground/60 mt-1">
1998
+ Language for voice input (what you speak)
1999
+ </p>
2000
+ </div>
2001
+ </div>
2002
+ )}
2003
+
973
2004
  <button
974
- onClick={handleSend}
975
- disabled={loading || (!inputValue.trim() && !currentFile)}
976
- className="h-11 w-11 rounded-xl flex items-center justify-center bg-gradient-to-br from-primary to-amber-500 text-white shadow-lg hover:shadow-xl disabled:opacity-50 disabled:cursor-not-allowed transition-all flex-shrink-0 hover:scale-105 active:scale-95"
977
- title="Send message (Enter)"
2005
+ onClick={() => {
2006
+ // Save to localStorage
2007
+ localStorage.setItem('voice_model', voiceModel);
2008
+ localStorage.setItem('output_language', outputLanguage);
2009
+ localStorage.setItem('input_language', inputLanguage);
2010
+ localStorage.setItem('voice_id', voiceId);
2011
+ setShowVoiceSettings(false);
2012
+ }}
2013
+ className="w-full mt-6 bg-primary text-primary-foreground rounded-xl py-2.5 text-sm font-medium hover:bg-primary/90 transition-colors"
978
2014
  >
979
- <Send className="w-5 h-5" strokeWidth={2.5} />
2015
+ Save Settings
980
2016
  </button>
981
2017
  </div>
982
-
983
2018
  </div>
984
- </div>
2019
+ )}
985
2020
 
986
2021
  {/* Prompt Executor Modal */}
987
2022
  {selectedPrompt && (
@@ -991,13 +2026,13 @@ export default function ChatPage() {
991
2026
  onClick={() => setSelectedPrompt(null)}
992
2027
  >
993
2028
  <div
994
- className="bg-card rounded-2xl p-6 w-[600px] max-h-[80vh] overflow-auto border border-border shadow-2xl animate-scale-in"
2029
+ className="bg-card rounded p-6 w-[600px] max-h-[80vh] overflow-auto border border-border shadow-2xl animate-scale-in"
995
2030
  onClick={(e) => e.stopPropagation()}
996
2031
  >
997
2032
  <div className="flex items-center justify-between mb-4">
998
2033
  <div className="flex items-center gap-3">
999
- <div className="w-10 h-10 rounded-lg bg-primary/10 flex items-center justify-center">
1000
- <FileText className="w-5 h-5 text-primary" />
2034
+ <div className="h-10 w-10 rounded-lg bg-primary/10 flex items-center justify-center">
2035
+ <DocumentTextIcon className="h-5 w-5 text-primary" />
1001
2036
  </div>
1002
2037
  <h2 className="text-xl font-bold text-foreground">{selectedPrompt.name}</h2>
1003
2038
  </div>
@@ -1005,7 +2040,7 @@ export default function ChatPage() {
1005
2040
  onClick={() => setSelectedPrompt(null)}
1006
2041
  className="btn btn-ghost w-10 h-10 p-0"
1007
2042
  >
1008
- <X className="w-5 h-5" />
2043
+ <XMarkIcon className="h-5 w-5" />
1009
2044
  </button>
1010
2045
  </div>
1011
2046
 
@@ -1046,7 +2081,7 @@ export default function ChatPage() {
1046
2081
  onClick={handleExecutePrompt}
1047
2082
  className="btn btn-primary w-full gap-2"
1048
2083
  >
1049
- <Play className="w-4 h-4" />
2084
+ <PlayIcon className="h-4 w-4" />
1050
2085
  Execute Prompt
1051
2086
  </button>
1052
2087
  </div>
@@ -1067,7 +2102,7 @@ export default function ChatPage() {
1067
2102
  className="absolute top-4 right-4 z-60 p-3 rounded-lg bg-white/10 hover:bg-white/20 backdrop-blur-sm border border-white/20 transition-all"
1068
2103
  title="Exit fullscreen"
1069
2104
  >
1070
- <X className="w-6 h-6 text-white" />
2105
+ <XMarkIcon className="w-6 h-6 text-white" />
1071
2106
  </button>
1072
2107
 
1073
2108
  {/* Widget Container */}
@@ -1078,44 +2113,95 @@ export default function ChatPage() {
1078
2113
  </div>
1079
2114
  </div>
1080
2115
  )}
2116
+
2117
+ {/* Voice Mode Overlay */}
2118
+ <VoiceOrbOverlay
2119
+ isOpen={voiceOverlayOpen}
2120
+ onClose={() => {
2121
+ setVoiceOverlayOpen(false);
2122
+ setVoiceModeEnabled(false);
2123
+ setSpokenText('');
2124
+ // Stop any playing audio
2125
+ if (audioRef.current) {
2126
+ audioRef.current.pause();
2127
+ audioRef.current = null;
2128
+ }
2129
+ setLlmState('idle');
2130
+ // Reset greeting flag so greeting plays on next open
2131
+ hasSpokenGreeting.current = false;
2132
+ }}
2133
+ onSendMessage={(text) => {
2134
+ console.log('📤 onSendMessage called with:', text);
2135
+ setLlmState('thinking');
2136
+ handleSend(text);
2137
+ }}
2138
+ onGreet={() => {
2139
+ // Only greet once per session to prevent overlap
2140
+ if (hasSpokenGreeting.current) {
2141
+ setLlmState('listening');
2142
+ return;
2143
+ }
2144
+ hasSpokenGreeting.current = true;
2145
+ // Use localized greeting based on output language
2146
+ const preset = LANG_PRESETS[outputLanguage] || LANG_PRESETS['en'];
2147
+ const greeting = preset.greeting;
2148
+ console.log('👋 onGreet called - playing welcome message in', preset.name);
2149
+ setSpokenText(greeting);
2150
+ setVoiceModeEnabled(true);
2151
+ playTextToSpeech(greeting);
2152
+ }}
2153
+ elevenLabsApiKey={elevenLabsApiKey || ''}
2154
+ llmState={llmState}
2155
+ spokenText={spokenText}
2156
+ displayMode={voiceDisplayMode}
2157
+ onDisplayModeChange={(mode) => {
2158
+ setVoiceDisplayMode(mode);
2159
+ if (mode === 'voice-chat') {
2160
+ setVoiceOverlayOpen(false);
2161
+ }
2162
+ }}
2163
+ onSettingsClick={() => setShowVoiceSettings(true)}
2164
+ inputLanguage={inputLanguage}
2165
+ voiceModeActive={voiceModeEnabled}
2166
+ onInterrupt={() => {
2167
+ // Talk-to-interrupt: stop TTS and switch to listening
2168
+ if (audioRef.current) {
2169
+ audioRef.current.pause();
2170
+ audioRef.current = null;
2171
+ }
2172
+ setSpokenText('');
2173
+ setLlmState('listening');
2174
+ }}
2175
+ />
1081
2176
  </div>
1082
2177
  );
1083
2178
  }
1084
2179
 
1085
2180
  function ChatMessageComponent({ message, tools }: { message: ChatMessage; tools: Tool[] }) {
1086
- if (message.role === 'tool') return null; // Don't render tool messages directly
1087
-
2181
+ if (message.role === 'tool') return null;
1088
2182
  const isUser = message.role === 'user';
1089
2183
 
1090
2184
  return (
1091
2185
  <div className="flex gap-4 items-start animate-fade-in group">
1092
- {/* Avatar */}
1093
2186
  {!isUser && (
1094
- <div className="w-8 h-8 rounded-full bg-gradient-to-br from-primary to-amber-500 flex items-center justify-center flex-shrink-0 shadow-md group-hover:shadow-lg transition-shadow">
1095
- <Bot className="w-5 h-5 text-white" strokeWidth={2.5} />
2187
+ <div className="h-8 w-8 rounded-full bg-gradient-to-br from-primary to-secondary flex items-center justify-center flex-shrink-0 shadow-md group-hover:shadow-lg transition-shadow">
2188
+ <SparklesIcon className="h-5 w-5 text-white" />
1096
2189
  </div>
1097
2190
  )}
1098
2191
  {isUser && (
1099
- <div className="w-8 h-8 rounded-full bg-gradient-to-br from-slate-600 to-slate-700 flex items-center justify-center flex-shrink-0 shadow-md group-hover:shadow-lg transition-shadow">
2192
+ <div className="h-8 w-8 rounded-full bg-gradient-to-br from-slate-600 to-slate-700 flex items-center justify-center flex-shrink-0 shadow-md group-hover:shadow-lg transition-shadow">
1100
2193
  <span className="text-white text-sm font-bold">You</span>
1101
2194
  </div>
1102
2195
  )}
1103
-
1104
- {/* Message Content */}
1105
2196
  <div className="flex-1 min-w-0">
1106
- {/* File if present */}
1107
2197
  {message.file && (
1108
2198
  <div className="mb-3 rounded-xl overflow-hidden border border-border/50 shadow-sm max-w-sm">
1109
2199
  {message.file.type.startsWith('image/') ? (
1110
- <img
1111
- src={message.file.data}
1112
- alt={message.file.name}
1113
- className="max-w-full"
1114
- />
2200
+ <img src={message.file.data} alt={message.file.name} className="max-w-full" />
1115
2201
  ) : (
1116
2202
  <div className="p-4 bg-muted/30 flex items-center gap-3">
1117
- <div className="w-10 h-10 rounded-lg bg-primary/10 flex items-center justify-center">
1118
- <FileText className="w-5 h-5 text-primary" />
2203
+ <div className="h-10 w-10 rounded-lg bg-primary/10 flex items-center justify-center">
2204
+ <DocumentTextIcon className="h-5 w-5 text-primary" />
1119
2205
  </div>
1120
2206
  <div className="flex-1 min-w-0">
1121
2207
  <p className="text-sm font-medium text-foreground truncate">{message.file.name}</p>
@@ -1125,8 +2211,6 @@ function ChatMessageComponent({ message, tools }: { message: ChatMessage; tools:
1125
2211
  )}
1126
2212
  </div>
1127
2213
  )}
1128
-
1129
- {/* Text content with markdown rendering */}
1130
2214
  {message.content && (
1131
2215
  <div className="text-sm leading-relaxed mb-4">
1132
2216
  {isUser ? (
@@ -1136,12 +2220,10 @@ function ChatMessageComponent({ message, tools }: { message: ChatMessage; tools:
1136
2220
  )}
1137
2221
  </div>
1138
2222
  )}
1139
-
1140
- {/* Tool Calls - ChatGPT-style cards */}
1141
2223
  {message.toolCalls && message.toolCalls.length > 0 && (
1142
2224
  <div className="space-y-3">
1143
- {message.toolCalls.map((toolCall) => (
1144
- <ToolCallComponent key={toolCall.id} toolCall={toolCall} tools={tools} />
2225
+ {message.toolCalls.map((tc: ToolCall) => (
2226
+ <ToolCallComponent key={tc.id} toolCall={tc} tools={tools} />
1145
2227
  ))}
1146
2228
  </div>
1147
2229
  )}
@@ -1154,55 +2236,41 @@ function ToolCallComponent({ toolCall, tools }: { toolCall: ToolCall; tools: Too
1154
2236
  const [showArgs, setShowArgs] = useState(false);
1155
2237
  const tool = tools.find((t) => t.name === toolCall.name);
1156
2238
 
1157
- // Get widget URI from multiple possible sources
1158
2239
  const componentUri =
1159
2240
  tool?.widget?.route ||
1160
2241
  tool?.outputTemplate ||
1161
2242
  tool?._meta?.['openai/outputTemplate'] ||
1162
2243
  tool?._meta?.['ui/template'];
1163
2244
 
1164
- // Get result data from toolCall and unwrap if needed
1165
2245
  let widgetData = toolCall.result || toolCall.arguments;
1166
2246
 
1167
- // Unwrap if response was wrapped by TransformInterceptor
1168
- // Check if it has the interceptor's structure: { success, data, metadata }
1169
2247
  if (widgetData && typeof widgetData === 'object' &&
1170
2248
  widgetData.success !== undefined && widgetData.data !== undefined) {
1171
- widgetData = widgetData.data; // Return the unwrapped data
2249
+ widgetData = widgetData.data;
1172
2250
  }
1173
2251
 
1174
- console.log('ToolCallComponent:', {
1175
- toolName: toolCall.name,
1176
- componentUri,
1177
- hasData: !!widgetData,
1178
- tool
1179
- });
1180
-
1181
2252
  return (
1182
2253
  <div className="relative group/widget">
1183
- {/* Widget - No frame, just the widget */}
1184
2254
  {componentUri && widgetData && (
1185
- <div className="rounded-lg overflow-hidden max-w-5xl">
1186
- <WidgetRenderer uri={componentUri} data={widgetData} className="widget-in-chat" />
1187
- </div>
2255
+ <WidgetErrorBoundary>
2256
+ <div className="rounded-lg overflow-hidden max-w-5xl" style={{ minHeight: '100px' }}>
2257
+ <WidgetRenderer uri={componentUri} data={widgetData} className="widget-in-chat" />
2258
+ </div>
2259
+ </WidgetErrorBoundary>
1188
2260
  )}
1189
-
1190
- {/* 3-dots menu button - positioned absolutely in top-right */}
1191
2261
  <button
1192
2262
  onClick={() => setShowArgs(!showArgs)}
1193
2263
  className="absolute top-2 right-2 w-8 h-8 rounded-lg flex items-center justify-center bg-background/80 backdrop-blur-sm border border-border/50 hover:bg-background hover:border-border transition-all opacity-0 group-hover/widget:opacity-100 shadow-sm z-10"
1194
2264
  title="View tool details"
1195
2265
  >
1196
- <MoreVertical className="w-4 h-4 text-muted-foreground" />
2266
+ <EllipsisVerticalIcon className="h-4 w-4 text-muted-foreground" />
1197
2267
  </button>
1198
-
1199
- {/* Arguments Modal/Dropdown - appears when 3-dots clicked */}
1200
2268
  {showArgs && (
1201
2269
  <div className="absolute top-12 right-2 w-96 max-w-[calc(100%-1rem)] bg-card rounded-xl border border-border shadow-2xl p-4 animate-fade-in z-20">
1202
2270
  <div className="flex items-center justify-between mb-3">
1203
2271
  <div className="flex items-center gap-2">
1204
2272
  <div className="w-6 h-6 rounded-md bg-primary/10 flex items-center justify-center">
1205
- <Wrench className="w-3.5 h-3.5 text-primary" />
2273
+ <WrenchScrewdriverIcon className="w-3.5 h-3.5 text-primary" />
1206
2274
  </div>
1207
2275
  <span className="font-semibold text-sm text-foreground">{toolCall.name}</span>
1208
2276
  </div>
@@ -1210,7 +2278,7 @@ function ToolCallComponent({ toolCall, tools }: { toolCall: ToolCall; tools: Too
1210
2278
  onClick={() => setShowArgs(false)}
1211
2279
  className="w-6 h-6 rounded-md flex items-center justify-center hover:bg-muted transition-colors"
1212
2280
  >
1213
- <X className="w-4 h-4 text-muted-foreground" />
2281
+ <XMarkIcon className="h-4 w-4 text-muted-foreground" />
1214
2282
  </button>
1215
2283
  </div>
1216
2284
  <div>
@@ -1224,4 +2292,3 @@ function ToolCallComponent({ toolCall, tools }: { toolCall: ToolCall; tools: Too
1224
2292
  </div>
1225
2293
  );
1226
2294
  }
1227
-