otherwise-cli 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/README.md +193 -0
  2. package/bin/otherwise.js +5 -0
  3. package/frontend/404.html +84 -0
  4. package/frontend/assets/OpenDyslexic3-Bold-CDyRs55Y.ttf +0 -0
  5. package/frontend/assets/OpenDyslexic3-Regular-CIBXa4WE.ttf +0 -0
  6. package/frontend/assets/__vite-browser-external-BIHI7g3E.js +1 -0
  7. package/frontend/assets/conversational-worker-CeKiciGk.js +2929 -0
  8. package/frontend/assets/dictation-worker-D0aYfq8b.js +29 -0
  9. package/frontend/assets/gemini-color-CgSQmmva.png +0 -0
  10. package/frontend/assets/index-BLux5ps4.js +21 -0
  11. package/frontend/assets/index-Blh8_TEM.js +5272 -0
  12. package/frontend/assets/index-BpQ1PuKu.js +18 -0
  13. package/frontend/assets/index-Df737c8w.css +1 -0
  14. package/frontend/assets/index-xaYHL6wb.js +113 -0
  15. package/frontend/assets/ort-wasm-simd-threaded.asyncify-BynIiDiv.wasm +0 -0
  16. package/frontend/assets/ort-wasm-simd-threaded.jsep-B0T3yYHD.wasm +0 -0
  17. package/frontend/assets/transformers-tULNc5V3.js +31 -0
  18. package/frontend/assets/tts-worker-DPJWqT7N.js +2899 -0
  19. package/frontend/assets/voice-mode-worker-GzvIE_uh.js +2927 -0
  20. package/frontend/assets/worker-2d5ABSLU.js +31 -0
  21. package/frontend/banner.png +0 -0
  22. package/frontend/favicon.svg +3 -0
  23. package/frontend/google55e5ec47ee14a5f8.html +1 -0
  24. package/frontend/index.html +234 -0
  25. package/frontend/manifest.json +17 -0
  26. package/frontend/pdf.worker.min.mjs +21 -0
  27. package/frontend/robots.txt +5 -0
  28. package/frontend/sitemap.xml +27 -0
  29. package/package.json +81 -0
  30. package/src/agent/index.js +1066 -0
  31. package/src/agent/location.js +51 -0
  32. package/src/agent/prompt.js +548 -0
  33. package/src/agent/tools.js +4372 -0
  34. package/src/browser/detect.js +68 -0
  35. package/src/browser/session.js +1109 -0
  36. package/src/config.js +137 -0
  37. package/src/email/client.js +503 -0
  38. package/src/index.js +557 -0
  39. package/src/inference/anthropic.js +113 -0
  40. package/src/inference/google.js +373 -0
  41. package/src/inference/index.js +81 -0
  42. package/src/inference/ollama.js +383 -0
  43. package/src/inference/openai.js +140 -0
  44. package/src/inference/openrouter.js +378 -0
  45. package/src/inference/xai.js +200 -0
  46. package/src/logBridge.js +9 -0
  47. package/src/models.js +146 -0
  48. package/src/remote/client.js +225 -0
  49. package/src/scheduler/cron.js +243 -0
  50. package/src/server.js +3876 -0
  51. package/src/storage/db.js +1135 -0
  52. package/src/storage/supabase.js +364 -0
  53. package/src/tunnel/cloudflare.js +241 -0
  54. package/src/ui/components/App.jsx +687 -0
  55. package/src/ui/components/BrowserSelect.jsx +111 -0
  56. package/src/ui/components/FilePicker.jsx +472 -0
  57. package/src/ui/components/Header.jsx +444 -0
  58. package/src/ui/components/HelpPanel.jsx +173 -0
  59. package/src/ui/components/HistoryPanel.jsx +158 -0
  60. package/src/ui/components/MessageList.jsx +235 -0
  61. package/src/ui/components/ModelSelector.jsx +304 -0
  62. package/src/ui/components/PromptInput.jsx +515 -0
  63. package/src/ui/components/StreamingResponse.jsx +134 -0
  64. package/src/ui/components/ThinkingIndicator.jsx +365 -0
  65. package/src/ui/components/ToolExecution.jsx +714 -0
  66. package/src/ui/components/index.js +82 -0
  67. package/src/ui/context/TerminalContext.jsx +150 -0
  68. package/src/ui/context/index.js +13 -0
  69. package/src/ui/hooks/index.js +16 -0
  70. package/src/ui/hooks/useChatState.js +675 -0
  71. package/src/ui/hooks/useCommands.js +280 -0
  72. package/src/ui/hooks/useFileAttachments.js +216 -0
  73. package/src/ui/hooks/useKeyboardShortcuts.js +173 -0
  74. package/src/ui/hooks/useNotifications.js +185 -0
  75. package/src/ui/hooks/useTerminalSize.js +151 -0
  76. package/src/ui/hooks/useWebSocket.js +273 -0
  77. package/src/ui/index.js +94 -0
  78. package/src/ui/ink-runner.js +22 -0
  79. package/src/ui/utils/formatters.js +424 -0
  80. package/src/ui/utils/index.js +6 -0
  81. package/src/ui/utils/markdown.js +166 -0
@@ -0,0 +1,378 @@
1
+ import OpenAI from 'openai';
2
+
3
+ // Cache for OpenRouter models (refreshed every 5 minutes)
4
+ let modelsCache = null;
5
+ let modelsCacheTime = 0;
6
+ const CACHE_TTL = 5 * 60 * 1000; // 5 minutes
7
+
8
+ /**
9
+ * Fetch all available models from OpenRouter
10
+ * @param {string} apiKey - OpenRouter API key
11
+ * @returns {Promise<Array>} Array of model objects
12
+ */
13
+ export async function fetchOpenRouterModels(apiKey) {
14
+ // Return cached models if still valid
15
+ if (modelsCache && Date.now() - modelsCacheTime < CACHE_TTL) {
16
+ return modelsCache;
17
+ }
18
+
19
+ try {
20
+ const response = await fetch('https://openrouter.ai/api/v1/models', {
21
+ headers: {
22
+ 'Authorization': `Bearer ${apiKey}`,
23
+ 'HTTP-Referer': 'https://otherwise.dev',
24
+ 'X-Title': 'Otherwise AI',
25
+ },
26
+ });
27
+
28
+ if (!response.ok) {
29
+ throw new Error(`Failed to fetch models: ${response.status}`);
30
+ }
31
+
32
+ const data = await response.json();
33
+
34
+ // Transform models to a consistent format
35
+ const models = (data.data || []).map(model => ({
36
+ id: `openrouter:${model.id}`,
37
+ name: model.name || model.id,
38
+ provider: 'OpenRouter',
39
+ contextLength: model.context_length || 4096,
40
+ maxTokens: model.top_provider?.max_completion_tokens || model.context_length || 4096,
41
+ pricing: {
42
+ prompt: model.pricing?.prompt || 0,
43
+ completion: model.pricing?.completion || 0,
44
+ image: model.pricing?.image || 0,
45
+ },
46
+ // Determine model capabilities from modality
47
+ type: getModelType(model),
48
+ // Store raw model data for reference
49
+ _raw: model,
50
+ }));
51
+
52
+ // Update cache
53
+ modelsCache = models;
54
+ modelsCacheTime = Date.now();
55
+
56
+ return models;
57
+ } catch (error) {
58
+ console.error('[OpenRouter] Failed to fetch models:', error);
59
+ // Return cached models if available, otherwise empty array
60
+ return modelsCache || [];
61
+ }
62
+ }
63
+
64
+ /**
65
+ * Determine model type/capabilities from OpenRouter model data
66
+ * @param {object} model - OpenRouter model object
67
+ * @returns {Array<string>} Array of type tags
68
+ */
69
+ function getModelType(model) {
70
+ const types = [];
71
+
72
+ const arch = model.architecture || {};
73
+ const inputModality = arch.input_modalities || arch.modality?.split('+') || [];
74
+ const outputModality = arch.output_modalities || [];
75
+
76
+ // Check for image input support
77
+ if (inputModality.includes('image')) {
78
+ types.push('image-input');
79
+ }
80
+
81
+ // Check for image output support
82
+ if (outputModality.includes('image')) {
83
+ types.push('image-output');
84
+ }
85
+
86
+ // Check for reasoning support based on model name patterns
87
+ const modelId = model.id.toLowerCase();
88
+ if (modelId.includes('reasoning') ||
89
+ modelId.includes('think') ||
90
+ modelId.includes('o1') ||
91
+ modelId.includes('o3') ||
92
+ modelId.includes('deepseek-r1') ||
93
+ modelId.includes('qwq')) {
94
+ types.push('reasoning');
95
+ }
96
+
97
+ return types;
98
+ }
99
+
100
+ /**
101
+ * Check if a model is an image generation model
102
+ * @param {string} model - Model identifier
103
+ * @returns {boolean}
104
+ */
105
+ export function isOpenRouterImageModel(model) {
106
+ const modelId = model.replace('openrouter:', '').toLowerCase();
107
+ // Common image generation model patterns
108
+ return modelId.includes('dall-e') ||
109
+ modelId.includes('stable-diffusion') ||
110
+ modelId.includes('midjourney') ||
111
+ modelId.includes('imagen') ||
112
+ modelId.includes('flux');
113
+ }
114
+
115
+ /**
116
+ * Check if a model supports reasoning/thinking content
117
+ * @param {string} model - Model identifier
118
+ * @returns {boolean}
119
+ */
120
+ export function isOpenRouterReasoningModel(model) {
121
+ const modelId = model.replace('openrouter:', '').toLowerCase();
122
+ return modelId.includes('reasoning') ||
123
+ modelId.includes('think') ||
124
+ modelId.includes('/o1') ||
125
+ modelId.includes('/o3') ||
126
+ modelId.includes('deepseek-r1') ||
127
+ modelId.includes('qwq');
128
+ }
129
+
130
+ /**
131
+ * Stream chat completion from OpenRouter
132
+ * Uses OpenAI-compatible API
133
+ * @param {string} model - Model identifier (with openrouter: prefix)
134
+ * @param {Array} messages - Array of message objects with role and content
135
+ * @param {string} systemPrompt - System prompt
136
+ * @param {object} config - Configuration with API keys and settings
137
+ * @yields {object} - Chunks with type and content
138
+ */
139
+ export async function* streamOpenRouter(model, messages, systemPrompt, config) {
140
+ const apiKey = config.apiKeys?.openrouter;
141
+ if (!apiKey) {
142
+ throw new Error('OpenRouter API key not configured. Run: otherwise config set openrouter <key>');
143
+ }
144
+
145
+ // Strip the openrouter: prefix to get the actual model ID
146
+ const actualModel = model.replace('openrouter:', '');
147
+
148
+ // Check for image models (would need different handling)
149
+ if (isOpenRouterImageModel(model)) {
150
+ // OpenRouter image generation goes through different models
151
+ // Most image models on OpenRouter work through chat completions
152
+ // returning image URLs or base64 in the response
153
+ yield* streamOpenRouterImage(actualModel, messages, config);
154
+ return;
155
+ }
156
+
157
+ const client = new OpenAI({
158
+ apiKey,
159
+ baseURL: 'https://openrouter.ai/api/v1',
160
+ defaultHeaders: {
161
+ 'HTTP-Referer': 'https://otherwise.dev',
162
+ 'X-Title': 'Otherwise AI',
163
+ },
164
+ timeout: 300000, // 5 minute timeout for long-running models
165
+ });
166
+
167
+ // Convert messages to OpenAI format, handling multimodal content
168
+ const openrouterMessages = [
169
+ { role: 'system', content: systemPrompt },
170
+ ...messages.map(m => {
171
+ const role = m.role === 'user' ? 'user' : 'assistant';
172
+
173
+ // Handle messages with images (multimodal)
174
+ if (m.images && m.images.length > 0 && m.role === 'user') {
175
+ const content = [];
176
+
177
+ // Add images first
178
+ for (const imgDataUrl of m.images) {
179
+ // Check if it's a data URL or regular URL
180
+ if (imgDataUrl.startsWith('data:')) {
181
+ content.push({
182
+ type: 'image_url',
183
+ image_url: { url: imgDataUrl },
184
+ });
185
+ } else {
186
+ content.push({
187
+ type: 'image_url',
188
+ image_url: { url: imgDataUrl },
189
+ });
190
+ }
191
+ }
192
+
193
+ // Add text content
194
+ content.push({ type: 'text', text: m.content || '' });
195
+
196
+ return { role, content };
197
+ }
198
+
199
+ // Regular text message
200
+ return { role, content: m.content };
201
+ }),
202
+ ];
203
+
204
+ // Check if this is a reasoning model
205
+ const isReasoningModel = isOpenRouterReasoningModel(model);
206
+
207
+ // Build request options
208
+ const requestOptions = {
209
+ model: actualModel,
210
+ messages: openrouterMessages,
211
+ max_tokens: config.maxTokens || 8192,
212
+ stream: true,
213
+ };
214
+
215
+ // Only add temperature for non-reasoning models (reasoning models often don't support it)
216
+ if (!isReasoningModel) {
217
+ requestOptions.temperature = config.temperature || 0.7;
218
+ }
219
+
220
+ try {
221
+ const stream = await client.chat.completions.create(requestOptions);
222
+
223
+ let usage = null;
224
+
225
+ for await (const chunk of stream) {
226
+ const delta = chunk.choices?.[0]?.delta;
227
+
228
+ // Handle reasoning content if available (some models return this)
229
+ if (delta?.reasoning_content) {
230
+ yield { type: 'thinking', content: delta.reasoning_content };
231
+ }
232
+
233
+ // Handle regular content
234
+ if (delta?.content) {
235
+ yield { type: 'text', content: delta.content };
236
+ }
237
+
238
+ // Capture usage from final chunk (OpenRouter includes this with streaming)
239
+ if (chunk.usage) {
240
+ usage = chunk.usage;
241
+ }
242
+ }
243
+
244
+ // Yield usage stats at the end
245
+ if (usage) {
246
+ yield {
247
+ type: 'usage',
248
+ inputTokens: usage.prompt_tokens || 0,
249
+ outputTokens: usage.completion_tokens || 0,
250
+ totalTokens: usage.total_tokens || 0,
251
+ };
252
+ }
253
+ } catch (error) {
254
+ // Enhance error messages
255
+ let enhancedMessage = error.message;
256
+
257
+ if (error.message?.includes('401') || error.message?.includes('Unauthorized')) {
258
+ enhancedMessage = 'Invalid OpenRouter API key. Check your key at https://openrouter.ai/keys';
259
+ } else if (error.message?.includes('402') || error.message?.includes('insufficient')) {
260
+ enhancedMessage = 'Insufficient OpenRouter credits. Add credits at https://openrouter.ai/credits';
261
+ } else if (error.message?.includes('429')) {
262
+ enhancedMessage = 'OpenRouter rate limit exceeded. Please wait and try again.';
263
+ } else if (error.message?.includes('model') && error.message?.includes('not found')) {
264
+ enhancedMessage = `Model "${actualModel}" not available on OpenRouter. Check available models at https://openrouter.ai/models`;
265
+ }
266
+
267
+ const enhancedError = new Error(enhancedMessage);
268
+ enhancedError.originalError = error;
269
+ throw enhancedError;
270
+ }
271
+ }
272
+
273
+ /**
274
+ * Handle image generation through OpenRouter
275
+ * Most image models on OpenRouter work through chat API
276
+ * @param {string} model - Model identifier (without prefix)
277
+ * @param {Array} messages - Messages array
278
+ * @param {object} config - Configuration
279
+ * @yields {object} - Chunks with type and content
280
+ */
281
+ async function* streamOpenRouterImage(model, messages, config) {
282
+ const apiKey = config.apiKeys?.openrouter;
283
+
284
+ const client = new OpenAI({
285
+ apiKey,
286
+ baseURL: 'https://openrouter.ai/api/v1',
287
+ defaultHeaders: {
288
+ 'HTTP-Referer': 'https://otherwise.dev',
289
+ 'X-Title': 'Otherwise AI',
290
+ },
291
+ timeout: 120000, // 2 minute timeout for image generation
292
+ });
293
+
294
+ // Get the last user message as the prompt
295
+ const lastUserMessage = [...messages].reverse().find(m => m.role === 'user');
296
+ const prompt = lastUserMessage?.content || 'Generate an image';
297
+
298
+ try {
299
+ // For DALL-E models through OpenRouter, use the images endpoint
300
+ if (model.includes('dall-e')) {
301
+ const response = await client.images.generate({
302
+ model,
303
+ prompt,
304
+ n: 1,
305
+ response_format: 'b64_json',
306
+ });
307
+
308
+ const imageData = response.data?.[0];
309
+
310
+ if (imageData?.b64_json) {
311
+ yield { type: 'text', content: 'Here\'s the generated image:\n\n' };
312
+ yield {
313
+ type: 'image',
314
+ content: imageData.b64_json,
315
+ mimeType: 'image/png',
316
+ revisedPrompt: imageData.revised_prompt || prompt,
317
+ };
318
+ } else if (imageData?.url) {
319
+ yield { type: 'text', content: 'Here\'s the generated image:\n\n' };
320
+ yield {
321
+ type: 'image_url',
322
+ content: imageData.url,
323
+ revisedPrompt: imageData.revised_prompt || prompt,
324
+ };
325
+ }
326
+ } else {
327
+ // For other image models, try chat completions (some return images this way)
328
+ const response = await client.chat.completions.create({
329
+ model,
330
+ messages: [
331
+ { role: 'system', content: 'You are an image generation assistant.' },
332
+ { role: 'user', content: prompt },
333
+ ],
334
+ max_tokens: 4096,
335
+ });
336
+
337
+ const content = response.choices?.[0]?.message?.content || '';
338
+
339
+ // Check if response contains image data or URL
340
+ if (content.includes('data:image')) {
341
+ // Extract base64 image
342
+ const match = content.match(/data:image\/[^;]+;base64,[^"'\s]+/);
343
+ if (match) {
344
+ yield { type: 'text', content: 'Here\'s the generated image:\n\n' };
345
+ yield {
346
+ type: 'image',
347
+ content: match[0].split(',')[1],
348
+ mimeType: match[0].split(';')[0].replace('data:', ''),
349
+ };
350
+ } else {
351
+ yield { type: 'text', content };
352
+ }
353
+ } else {
354
+ // Return as text (might be a description or URL)
355
+ yield { type: 'text', content };
356
+ }
357
+ }
358
+ } catch (error) {
359
+ console.error('[OpenRouter Image] Generation error:', error);
360
+ throw error;
361
+ }
362
+ }
363
+
364
+ /**
365
+ * Clear the models cache (useful for forcing a refresh)
366
+ */
367
+ export function clearOpenRouterModelsCache() {
368
+ modelsCache = null;
369
+ modelsCacheTime = 0;
370
+ }
371
+
372
+ export default {
373
+ streamOpenRouter,
374
+ fetchOpenRouterModels,
375
+ isOpenRouterImageModel,
376
+ isOpenRouterReasoningModel,
377
+ clearOpenRouterModelsCache,
378
+ };
@@ -0,0 +1,200 @@
1
+ import OpenAI from 'openai';
2
+
3
+ /**
4
+ * Check if a model supports reasoning content
5
+ * According to XAI docs: grok-3-mini returns reasoning_content
6
+ * grok-3, grok-4, grok-4-fast-reasoning use encrypted reasoning (not directly accessible)
7
+ */
8
+ function supportsReasoningContent(model) {
9
+ return model === 'grok-3-mini';
10
+ }
11
+
12
+ /**
13
+ * Check if a model is a reasoning model (for request configuration)
14
+ */
15
+ function isReasoningModel(model) {
16
+ return model.includes('reasoning') ||
17
+ model === 'grok-3-mini' ||
18
+ model === 'grok-3' ||
19
+ model === 'grok-4';
20
+ }
21
+
22
+ /**
23
+ * Check if a model is an image generation model
24
+ */
25
+ export function isGrokImageModel(model) {
26
+ return model === 'grok-2-image';
27
+ }
28
+
29
+ /**
30
+ * Generate an image using xAI Grok image model
31
+ * Uses the /v1/images/generations endpoint (NOT chat completions)
32
+ * @param {string} model - Model identifier (e.g., 'grok-2-image')
33
+ * @param {string} prompt - Text prompt for image generation
34
+ * @param {object} config - Configuration with API keys and settings
35
+ * @yields {object} - Chunks with type and content (image as base64)
36
+ */
37
+ export async function* generateGrokImage(model, prompt, config) {
38
+ const apiKey = config.apiKeys?.xai;
39
+ if (!apiKey) {
40
+ throw new Error('xAI API key not configured. Run: otherwise config set xai <key>');
41
+ }
42
+
43
+ console.log('[xAI Image] Starting image generation with model:', model);
44
+ console.log('[xAI Image] Prompt:', prompt.substring(0, 100) + '...');
45
+
46
+ const client = new OpenAI({
47
+ apiKey,
48
+ baseURL: 'https://api.x.ai/v1',
49
+ timeout: 120000, // 2 minute timeout for image generation
50
+ });
51
+
52
+ try {
53
+ // Use the images.generate endpoint for image models
54
+ console.log('[xAI Image] Calling images.generate endpoint...');
55
+ const response = await client.images.generate({
56
+ model,
57
+ prompt,
58
+ n: 1,
59
+ response_format: 'b64_json', // Get base64 data directly
60
+ });
61
+
62
+ console.log('[xAI Image] Response received');
63
+ console.log('[xAI Image] Data array length:', response.data?.length);
64
+
65
+ // Extract the image data
66
+ const imageData = response.data?.[0];
67
+
68
+ if (imageData?.b64_json) {
69
+ console.log('[xAI Image] Got b64_json, length:', imageData.b64_json.length);
70
+ // Yield a brief text response
71
+ yield { type: 'text', content: 'Here\'s the generated image:\n\n' };
72
+
73
+ // Yield the image as base64
74
+ yield {
75
+ type: 'image',
76
+ content: imageData.b64_json,
77
+ mimeType: 'image/jpeg', // Grok generates JPG format
78
+ revisedPrompt: imageData.revised_prompt || prompt,
79
+ };
80
+ } else if (imageData?.url) {
81
+ console.log('[xAI Image] Got URL:', imageData.url.substring(0, 50));
82
+ // Fallback to URL if b64_json not available
83
+ yield { type: 'text', content: 'Here\'s the generated image:\n\n' };
84
+ yield {
85
+ type: 'image_url',
86
+ content: imageData.url,
87
+ revisedPrompt: imageData.revised_prompt || prompt,
88
+ };
89
+ } else {
90
+ console.error('[xAI Image] No image data in response:', JSON.stringify(response).substring(0, 500));
91
+ throw new Error('No image data returned from xAI');
92
+ }
93
+ } catch (error) {
94
+ console.error('[xAI Image] Generation error:', error);
95
+ throw error;
96
+ }
97
+ }
98
+
99
+ /**
100
+ * Stream chat completion from xAI Grok (uses OpenAI-compatible API)
101
+ * @param {string} model - Model identifier (e.g., 'grok-4-fast-reasoning')
102
+ * @param {Array} messages - Array of message objects with role and content
103
+ * @param {string} systemPrompt - System prompt
104
+ * @param {object} config - Configuration with API keys and settings
105
+ * @yields {object} - Chunks with type and content
106
+ */
107
+ export async function* streamGrok(model, messages, systemPrompt, config) {
108
+ const apiKey = config.apiKeys?.xai;
109
+ if (!apiKey) {
110
+ throw new Error('xAI API key not configured. Run: otherwise config set xai <key>');
111
+ }
112
+
113
+ // Route image models to the image generation function
114
+ if (isGrokImageModel(model)) {
115
+ // For image generation, use the last user message as the prompt
116
+ const lastUserMessage = [...messages].reverse().find(m => m.role === 'user');
117
+ const prompt = lastUserMessage?.content || 'Generate an image';
118
+ yield* generateGrokImage(model, prompt, config);
119
+ return;
120
+ }
121
+
122
+ const client = new OpenAI({
123
+ apiKey,
124
+ baseURL: 'https://api.x.ai/v1',
125
+ timeout: 360000, // 6 minute timeout for reasoning models
126
+ });
127
+
128
+ // Convert messages to OpenAI format (xAI uses same format; vision: user messages can have images)
129
+ const xaiMessages = [
130
+ { role: 'system', content: systemPrompt },
131
+ ...messages.map(m => {
132
+ const role = m.role === 'user' ? 'user' : 'assistant';
133
+ if (m.images && m.images.length > 0 && m.role === 'user') {
134
+ const imageBlocks = m.images.map((imgDataUrl) => ({
135
+ type: 'image_url',
136
+ image_url: { url: imgDataUrl },
137
+ }));
138
+ return {
139
+ role,
140
+ content: [...imageBlocks, { type: 'text', text: m.content || '' }],
141
+ };
142
+ }
143
+ return { role, content: m.content };
144
+ }),
145
+ ];
146
+
147
+ // Build request options
148
+ const requestOptions = {
149
+ model,
150
+ messages: xaiMessages,
151
+ max_completion_tokens: config.maxTokens || 8192,
152
+ stream: true,
153
+ stream_options: { include_usage: true }, // Get real token counts
154
+ };
155
+
156
+ // Only add temperature for non-reasoning models (reasoning models don't support it per docs)
157
+ if (!isReasoningModel(model)) {
158
+ requestOptions.temperature = config.temperature || 0.7;
159
+ }
160
+
161
+ // For grok-3-mini, add reasoning_effort if desired
162
+ if (model === 'grok-3-mini') {
163
+ requestOptions.reasoning_effort = config.reasoningEffort || 'high';
164
+ }
165
+
166
+ const stream = await client.chat.completions.create(requestOptions);
167
+
168
+ let usage = null;
169
+
170
+ for await (const chunk of stream) {
171
+ const delta = chunk.choices?.[0]?.delta;
172
+
173
+ // Handle reasoning content (grok-3-mini returns this)
174
+ if (supportsReasoningContent(model) && delta?.reasoning_content) {
175
+ yield { type: 'thinking', content: delta.reasoning_content };
176
+ }
177
+
178
+ // Handle regular content
179
+ if (delta?.content) {
180
+ yield { type: 'text', content: delta.content };
181
+ }
182
+
183
+ // Capture usage from final chunk
184
+ if (chunk.usage) {
185
+ usage = chunk.usage;
186
+ }
187
+ }
188
+
189
+ // Yield usage stats at the end
190
+ if (usage) {
191
+ yield {
192
+ type: 'usage',
193
+ inputTokens: usage.prompt_tokens || 0,
194
+ outputTokens: usage.completion_tokens || 0,
195
+ totalTokens: usage.total_tokens || 0,
196
+ };
197
+ }
198
+ }
199
+
200
+ export default { streamGrok, generateGrokImage, isGrokImageModel };
@@ -0,0 +1,9 @@
1
+ /**
2
+ * Log bridge: allows the server to receive log lines from anywhere in the process
3
+ * (e.g. console.log from tools.js). The server sets logSink.write to broadcast to WebSocket clients.
4
+ */
5
+ export const logSink = {
6
+ write(_level, _args) {
7
+ // No-op by default; server.js assigns a function that broadcasts to connected clients
8
+ },
9
+ };