lemonade-sdk 8.1.2__py3-none-any.whl → 8.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lemonade-sdk might be problematic. Click here for more details.

@@ -0,0 +1,735 @@
1
+ // Chat logic and functionality
2
+ let messages = [];
3
+ let attachedFiles = [];
4
+
5
+ // Default model configuration
6
+ const DEFAULT_MODEL = 'Qwen2.5-0.5B-Instruct-CPU';
7
+
8
+ // Get DOM elements
9
+ let chatHistory, chatInput, sendBtn, attachmentBtn, fileAttachment, attachmentsPreviewContainer, attachmentsPreviewRow, modelSelect;
10
+
11
+ // Initialize chat functionality when DOM is loaded
12
+ document.addEventListener('DOMContentLoaded', function() {
13
+ chatHistory = document.getElementById('chat-history');
14
+ chatInput = document.getElementById('chat-input');
15
+ sendBtn = document.getElementById('send-btn');
16
+ attachmentBtn = document.getElementById('attachment-btn');
17
+ fileAttachment = document.getElementById('file-attachment');
18
+ attachmentsPreviewContainer = document.getElementById('attachments-preview-container');
19
+ attachmentsPreviewRow = document.getElementById('attachments-preview-row');
20
+ modelSelect = document.getElementById('model-select');
21
+
22
+ // Set up event listeners
23
+ setupChatEventListeners();
24
+
25
+ // Initialize model dropdown (will be populated when models.js calls updateModelStatusIndicator)
26
+ initializeModelDropdown();
27
+
28
+ // Update attachment button state periodically
29
+ updateAttachmentButtonState();
30
+ setInterval(updateAttachmentButtonState, 1000);
31
+ });
32
+
33
+ function setupChatEventListeners() {
34
+ // Send button click
35
+ sendBtn.onclick = sendMessage;
36
+
37
+ // Attachment button click
38
+ attachmentBtn.onclick = () => {
39
+ if (!currentLoadedModel) {
40
+ alert('Please load a model first before attaching images.');
41
+ return;
42
+ }
43
+ if (!isVisionModel(currentLoadedModel)) {
44
+ alert(`The current model "${currentLoadedModel}" does not support image inputs. Please load a model with "Vision" capabilities to attach images.`);
45
+ return;
46
+ }
47
+ fileAttachment.click();
48
+ };
49
+
50
+ // File input change
51
+ fileAttachment.addEventListener('change', handleFileSelection);
52
+
53
+ // Chat input events
54
+ chatInput.addEventListener('keydown', handleChatInputKeydown);
55
+ chatInput.addEventListener('paste', handleChatInputPaste);
56
+
57
+ // Model select change
58
+ modelSelect.addEventListener('change', handleModelSelectChange);
59
+
60
+ // Send button click
61
+ sendBtn.addEventListener('click', function() {
62
+ // Check if we have a loaded model
63
+ if (currentLoadedModel && modelSelect.value !== '' && !modelSelect.disabled) {
64
+ sendMessage();
65
+ } else if (!currentLoadedModel) {
66
+ // Auto-load default model and send
67
+ autoLoadDefaultModelAndSend();
68
+ }
69
+ });
70
+ }
71
+
72
+ // Initialize model dropdown with available models
73
+ function initializeModelDropdown() {
74
+ const allModels = window.SERVER_MODELS || {};
75
+
76
+ // Clear existing options except the first one
77
+ modelSelect.innerHTML = '<option value="">Pick a model</option>';
78
+
79
+ // Add only installed models to dropdown
80
+ Object.keys(allModels).forEach(modelId => {
81
+ // Only add if the model is installed
82
+ if (window.installedModels && window.installedModels.has(modelId)) {
83
+ const option = document.createElement('option');
84
+ option.value = modelId;
85
+ option.textContent = modelId;
86
+ modelSelect.appendChild(option);
87
+ }
88
+ });
89
+
90
+ // Set current selection based on loaded model
91
+ updateModelSelectValue();
92
+ }
93
+
94
+ // Make dropdown initialization accessible globally so models.js can refresh it
95
+ window.initializeModelDropdown = initializeModelDropdown;
96
+
97
+ // Update model select value to match currently loaded model
98
+ function updateModelSelectValue() {
99
+ if (currentLoadedModel) {
100
+ modelSelect.value = currentLoadedModel;
101
+ } else {
102
+ modelSelect.value = '';
103
+ }
104
+ }
105
+
106
+ // Make updateModelSelectValue accessible globally
107
+ window.updateModelSelectValue = updateModelSelectValue;
108
+
109
+ // Handle model selection change
110
+ async function handleModelSelectChange() {
111
+ const selectedModel = modelSelect.value;
112
+
113
+ if (!selectedModel) {
114
+ return; // "Pick a model" selected
115
+ }
116
+
117
+ if (selectedModel === currentLoadedModel) {
118
+ return; // Same model already loaded
119
+ }
120
+
121
+ // Use the standardized load function
122
+ await loadModelStandardized(selectedModel, {
123
+ onLoadingStart: (modelId) => {
124
+ // Update dropdown to show loading state with model name
125
+ const loadingOption = modelSelect.querySelector('option[value=""]');
126
+ if (loadingOption) {
127
+ loadingOption.textContent = `Loading ${modelId}...`;
128
+ }
129
+ },
130
+ onLoadingEnd: (modelId, success) => {
131
+ // Reset the default option text
132
+ const defaultOption = modelSelect.querySelector('option[value=""]');
133
+ if (defaultOption) {
134
+ defaultOption.textContent = 'Pick a model';
135
+ }
136
+ },
137
+ onSuccess: (loadedModelId) => {
138
+ // Update attachment button state for new model
139
+ updateAttachmentButtonState();
140
+ },
141
+ onError: (error, failedModelId) => {
142
+ // Reset dropdown to previous value on error
143
+ updateModelSelectValue();
144
+ }
145
+ });
146
+ }
147
+
148
+ // Update attachment button state based on current model
149
+ function updateAttachmentButtonState() {
150
+ // Update model dropdown selection
151
+ updateModelSelectValue();
152
+
153
+ // Update send button state based on model loading
154
+ if (modelSelect.disabled) {
155
+ sendBtn.disabled = true;
156
+ sendBtn.textContent = 'Loading...';
157
+ } else {
158
+ sendBtn.disabled = false;
159
+ sendBtn.textContent = 'Send';
160
+ }
161
+
162
+ if (!currentLoadedModel) {
163
+ attachmentBtn.style.opacity = '0.5';
164
+ attachmentBtn.style.cursor = 'not-allowed';
165
+ attachmentBtn.title = 'Load a model first';
166
+ return;
167
+ }
168
+
169
+ const isVision = isVisionModel(currentLoadedModel);
170
+
171
+ if (isVision) {
172
+ attachmentBtn.style.opacity = '1';
173
+ attachmentBtn.style.cursor = 'pointer';
174
+ attachmentBtn.title = 'Attach images';
175
+ } else {
176
+ attachmentBtn.style.opacity = '0.5';
177
+ attachmentBtn.style.cursor = 'not-allowed';
178
+ attachmentBtn.title = 'Image attachments not supported by this model';
179
+ }
180
+ }
181
+
182
+ // Make updateAttachmentButtonState accessible globally
183
+ window.updateAttachmentButtonState = updateAttachmentButtonState;
184
+
185
+ // Auto-load default model and send message
186
+ async function autoLoadDefaultModelAndSend() {
187
+ // Check if default model is available and installed
188
+ if (!window.SERVER_MODELS || !window.SERVER_MODELS[DEFAULT_MODEL]) {
189
+ showErrorBanner('No models available. Please install a model first.');
190
+ return;
191
+ }
192
+
193
+ if (!window.installedModels || !window.installedModels.has(DEFAULT_MODEL)) {
194
+ showErrorBanner('Default model is not installed. Please install it from the Model Management tab.');
195
+ return;
196
+ }
197
+
198
+ // Store the message to send after loading
199
+ const messageToSend = chatInput.value.trim();
200
+ if (!messageToSend && attachedFiles.length === 0) {
201
+ return; // Nothing to send
202
+ }
203
+
204
+ // Use the standardized load function
205
+ const success = await loadModelStandardized(DEFAULT_MODEL, {
206
+ onLoadingStart: (modelId) => {
207
+ // Custom UI updates for auto-loading
208
+ sendBtn.textContent = 'Loading model...';
209
+ },
210
+ onLoadingEnd: (modelId, loadSuccess) => {
211
+ // Reset send button text
212
+ sendBtn.textContent = 'Send';
213
+ },
214
+ onSuccess: (loadedModelId) => {
215
+ // Send the message after successful load
216
+ sendMessage(messageToSend);
217
+ },
218
+ onError: (error, failedModelId) => {
219
+ console.error('Error auto-loading default model:', error);
220
+ }
221
+ });
222
+ }
223
+
224
+ // Check if model supports vision and update attachment button
225
+ function checkCurrentModel() {
226
+ if (attachedFiles.length > 0 && currentLoadedModel && !isVisionModel(currentLoadedModel)) {
227
+ if (confirm(`The current model "${currentLoadedModel}" does not support images. Would you like to remove the attached images?`)) {
228
+ clearAttachments();
229
+ }
230
+ }
231
+ updateAttachmentButtonState();
232
+ }
233
+
234
+ // Handle file selection
235
+ function handleFileSelection() {
236
+ if (fileAttachment.files.length > 0) {
237
+ // Check if current model supports vision
238
+ if (!currentLoadedModel) {
239
+ alert('Please load a model first before attaching images.');
240
+ fileAttachment.value = ''; // Clear the input
241
+ return;
242
+ }
243
+ if (!isVisionModel(currentLoadedModel)) {
244
+ alert(`The current model "${currentLoadedModel}" does not support image inputs. Please load a model with "Vision" capabilities.`);
245
+ fileAttachment.value = ''; // Clear the input
246
+ return;
247
+ }
248
+
249
+ // Filter only image files
250
+ const imageFiles = Array.from(fileAttachment.files).filter(file => {
251
+ if (!file.type.startsWith('image/')) {
252
+ console.warn(`Skipping non-image file: ${file.name} (${file.type})`);
253
+ return false;
254
+ }
255
+ return true;
256
+ });
257
+
258
+ if (imageFiles.length === 0) {
259
+ alert('Please select only image files (PNG, JPG, GIF, etc.)');
260
+ fileAttachment.value = ''; // Clear the input
261
+ return;
262
+ }
263
+
264
+ if (imageFiles.length !== fileAttachment.files.length) {
265
+ alert(`${fileAttachment.files.length - imageFiles.length} non-image file(s) were skipped. Only image files are supported.`);
266
+ }
267
+
268
+ attachedFiles = imageFiles;
269
+ updateInputPlaceholder();
270
+ updateAttachmentPreviewVisibility();
271
+ updateAttachmentPreviews();
272
+ }
273
+ }
274
+
275
+ // Handle chat input keydown events
276
+ function handleChatInputKeydown(e) {
277
+ if (e.key === 'Escape' && attachedFiles.length > 0) {
278
+ e.preventDefault();
279
+ clearAttachments();
280
+ } else if (e.key === 'Enter') {
281
+ // Check if we have a loaded model
282
+ if (currentLoadedModel && modelSelect.value !== '' && !modelSelect.disabled) {
283
+ sendMessage();
284
+ } else if (!currentLoadedModel) {
285
+ // Auto-load default model and send
286
+ autoLoadDefaultModelAndSend();
287
+ }
288
+ }
289
+ }
290
+
291
+ // Handle paste events for images
292
+ async function handleChatInputPaste(e) {
293
+ e.preventDefault();
294
+
295
+ const clipboardData = e.clipboardData || window.clipboardData;
296
+ const items = clipboardData.items;
297
+ let hasImage = false;
298
+ let pastedText = '';
299
+
300
+ // Check for text content first
301
+ for (let item of items) {
302
+ if (item.type === 'text/plain') {
303
+ pastedText = clipboardData.getData('text/plain');
304
+ }
305
+ }
306
+
307
+ // Check for images
308
+ for (let item of items) {
309
+ if (item.type.indexOf('image') !== -1) {
310
+ hasImage = true;
311
+ const file = item.getAsFile();
312
+ if (file && file.type.startsWith('image/')) {
313
+ // Check if current model supports vision before adding image
314
+ const currentModel = modelSelect.value;
315
+ if (!isVisionModel(currentModel)) {
316
+ alert(`The selected model "${currentModel}" does not support image inputs. Please select a model with "Vision" capabilities to paste images.`);
317
+ // Only paste text, skip the image
318
+ if (pastedText) {
319
+ chatInput.value = pastedText;
320
+ }
321
+ return;
322
+ }
323
+ // Add to attachedFiles array only if it's an image and model supports vision
324
+ attachedFiles.push(file);
325
+ } else if (file) {
326
+ console.warn(`Skipping non-image pasted file: ${file.name || 'unknown'} (${file.type})`);
327
+ }
328
+ }
329
+ }
330
+
331
+ // Update input box content - only show text, images will be indicated separately
332
+ if (pastedText) {
333
+ chatInput.value = pastedText;
334
+ }
335
+
336
+ // Update placeholder to show attached images
337
+ updateInputPlaceholder();
338
+ updateAttachmentPreviewVisibility();
339
+ updateAttachmentPreviews();
340
+ }
341
+
342
+ function clearAttachments() {
343
+ attachedFiles = [];
344
+ fileAttachment.value = '';
345
+ updateInputPlaceholder();
346
+ updateAttachmentPreviewVisibility();
347
+ updateAttachmentPreviews();
348
+ }
349
+
350
+ function updateAttachmentPreviewVisibility() {
351
+ if (attachedFiles.length > 0) {
352
+ attachmentsPreviewContainer.classList.add('has-attachments');
353
+ } else {
354
+ attachmentsPreviewContainer.classList.remove('has-attachments');
355
+ }
356
+ }
357
+
358
+ function updateAttachmentPreviews() {
359
+ // Clear existing previews
360
+ attachmentsPreviewRow.innerHTML = '';
361
+
362
+ if (attachedFiles.length === 0) {
363
+ return;
364
+ }
365
+
366
+ attachedFiles.forEach((file, index) => {
367
+ // Skip non-image files (extra safety check)
368
+ if (!file.type.startsWith('image/')) {
369
+ console.warn(`Skipping non-image file in preview: ${file.name} (${file.type})`);
370
+ return;
371
+ }
372
+
373
+ const previewDiv = document.createElement('div');
374
+ previewDiv.className = 'attachment-preview';
375
+
376
+ // Create thumbnail
377
+ const thumbnail = document.createElement('img');
378
+ thumbnail.className = 'attachment-thumbnail';
379
+ thumbnail.alt = file.name;
380
+
381
+ // Create filename display
382
+ const filename = document.createElement('div');
383
+ filename.className = 'attachment-filename';
384
+ filename.textContent = file.name || `pasted-image-${index + 1}`;
385
+ filename.title = file.name || `pasted-image-${index + 1}`;
386
+
387
+ // Create remove button
388
+ const removeBtn = document.createElement('button');
389
+ removeBtn.className = 'attachment-remove-btn';
390
+ removeBtn.innerHTML = '✕';
391
+ removeBtn.title = 'Remove this image';
392
+ removeBtn.onclick = () => removeAttachment(index);
393
+
394
+ // Generate thumbnail for image
395
+ const reader = new FileReader();
396
+ reader.onload = (e) => {
397
+ thumbnail.src = e.target.result;
398
+ };
399
+ reader.readAsDataURL(file);
400
+
401
+ previewDiv.appendChild(thumbnail);
402
+ previewDiv.appendChild(filename);
403
+ previewDiv.appendChild(removeBtn);
404
+ attachmentsPreviewRow.appendChild(previewDiv);
405
+ });
406
+ }
407
+
408
+ function removeAttachment(index) {
409
+ attachedFiles.splice(index, 1);
410
+ updateInputPlaceholder();
411
+ updateAttachmentPreviewVisibility();
412
+ updateAttachmentPreviews();
413
+ }
414
+
415
+ // Function to update input placeholder to show attached files
416
+ function updateInputPlaceholder() {
417
+ if (attachedFiles.length > 0) {
418
+ chatInput.placeholder = `Type your message... (${attachedFiles.length} image${attachedFiles.length > 1 ? 's' : ''} attached)`;
419
+ } else {
420
+ chatInput.placeholder = 'Type your message...';
421
+ }
422
+ }
423
+
424
+ // Function to convert file to base64
425
+ function fileToBase64(file) {
426
+ return new Promise((resolve, reject) => {
427
+ const reader = new FileReader();
428
+ reader.readAsDataURL(file);
429
+ reader.onload = () => resolve(reader.result.split(',')[1]); // Remove data:image/...;base64, prefix
430
+ reader.onerror = error => reject(error);
431
+ });
432
+ }
433
+
434
+ function appendMessage(role, text, isMarkdown = false) {
435
+ const div = document.createElement('div');
436
+ div.className = 'chat-message ' + role;
437
+ // Add a bubble for iMessage style
438
+ const bubble = document.createElement('div');
439
+ bubble.className = 'chat-bubble ' + role;
440
+
441
+ if (role === 'llm' && isMarkdown) {
442
+ bubble.innerHTML = renderMarkdownWithThinkTokens(text);
443
+ } else {
444
+ bubble.textContent = text;
445
+ }
446
+
447
+ div.appendChild(bubble);
448
+ chatHistory.appendChild(div);
449
+ chatHistory.scrollTop = chatHistory.scrollHeight;
450
+ return bubble; // Return the bubble element for streaming updates
451
+ }
452
+
453
+ function updateMessageContent(bubbleElement, text, isMarkdown = false) {
454
+ if (isMarkdown) {
455
+ bubbleElement.innerHTML = renderMarkdownWithThinkTokens(text);
456
+ } else {
457
+ bubbleElement.textContent = text;
458
+ }
459
+ }
460
+
461
+ function renderMarkdownWithThinkTokens(text) {
462
+ // Check if text contains opening think tag
463
+ if (text.includes('<think>')) {
464
+ if (text.includes('</think>')) {
465
+ // Complete think block - handle as before
466
+ const thinkMatch = text.match(/<think>(.*?)<\/think>/s);
467
+ if (thinkMatch) {
468
+ const thinkContent = thinkMatch[1].trim();
469
+ const mainResponse = text.replace(/<think>.*?<\/think>/s, '').trim();
470
+
471
+ // Create collapsible structure
472
+ let html = '';
473
+ if (thinkContent) {
474
+ html += `
475
+ <div class="think-tokens-container">
476
+ <div class="think-tokens-header" onclick="toggleThinkTokens(this)">
477
+ <span class="think-tokens-chevron">▼</span>
478
+ <span class="think-tokens-label">Thinking...</span>
479
+ </div>
480
+ <div class="think-tokens-content">
481
+ ${renderMarkdown(thinkContent)}
482
+ </div>
483
+ </div>
484
+ `;
485
+ }
486
+ if (mainResponse) {
487
+ html += `<div class="main-response">${renderMarkdown(mainResponse)}</div>`;
488
+ }
489
+ return html;
490
+ }
491
+ } else {
492
+ // Partial think block - only opening tag found, still being generated
493
+ const thinkMatch = text.match(/<think>(.*)/s);
494
+ if (thinkMatch) {
495
+ const thinkContent = thinkMatch[1];
496
+ const beforeThink = text.substring(0, text.indexOf('<think>'));
497
+
498
+ let html = '';
499
+ if (beforeThink.trim()) {
500
+ html += `<div class="main-response">${renderMarkdown(beforeThink)}</div>`;
501
+ }
502
+
503
+ html += `
504
+ <div class="think-tokens-container">
505
+ <div class="think-tokens-header" onclick="toggleThinkTokens(this)">
506
+ <span class="think-tokens-chevron">▼</span>
507
+ <span class="think-tokens-label">Thinking...</span>
508
+ </div>
509
+ <div class="think-tokens-content">
510
+ ${renderMarkdown(thinkContent)}
511
+ </div>
512
+ </div>
513
+ `;
514
+
515
+ return html;
516
+ }
517
+ }
518
+ }
519
+
520
+ // Fallback to normal markdown rendering
521
+ return renderMarkdown(text);
522
+ }
523
+
524
+ function toggleThinkTokens(header) {
525
+ const container = header.parentElement;
526
+ const content = container.querySelector('.think-tokens-content');
527
+ const chevron = header.querySelector('.think-tokens-chevron');
528
+
529
+ if (content.style.display === 'none') {
530
+ content.style.display = 'block';
531
+ chevron.textContent = '▼';
532
+ container.classList.remove('collapsed');
533
+ } else {
534
+ content.style.display = 'none';
535
+ chevron.textContent = '▶';
536
+ container.classList.add('collapsed');
537
+ }
538
+ }
539
+
540
+ async function sendMessage() {
541
+ const text = chatInput.value.trim();
542
+ if (!text && attachedFiles.length === 0) return;
543
+
544
+ // Check if a model is loaded, if not, automatically load the default model
545
+ if (!currentLoadedModel) {
546
+ const allModels = window.SERVER_MODELS || {};
547
+
548
+ if (allModels[DEFAULT_MODEL]) {
549
+ try {
550
+ // Show loading message
551
+ const loadingBubble = appendMessage('system', 'Loading default model, please wait...');
552
+
553
+ // Load the default model
554
+ await httpRequest(getServerBaseUrl() + '/api/v1/load', {
555
+ method: 'POST',
556
+ headers: { 'Content-Type': 'application/json' },
557
+ body: JSON.stringify({ model_name: DEFAULT_MODEL })
558
+ });
559
+
560
+ // Update model status
561
+ await updateModelStatusIndicator();
562
+
563
+ // Remove loading message
564
+ loadingBubble.parentElement.remove();
565
+
566
+ // Show success message briefly
567
+ const successBubble = appendMessage('system', `Loaded ${DEFAULT_MODEL} successfully!`);
568
+ setTimeout(() => {
569
+ successBubble.parentElement.remove();
570
+ }, 2000);
571
+
572
+ } catch (error) {
573
+ alert('Please load a model first before sending messages.');
574
+ return;
575
+ }
576
+ } else {
577
+ alert('Please load a model first before sending messages.');
578
+ return;
579
+ }
580
+ }
581
+
582
+ // Check if trying to send images to non-vision model
583
+ if (attachedFiles.length > 0) {
584
+ if (!isVisionModel(currentLoadedModel)) {
585
+ alert(`Cannot send images to model "${currentLoadedModel}" as it does not support vision. Please load a model with "Vision" capabilities or remove the attached images.`);
586
+ return;
587
+ }
588
+ }
589
+
590
+ // Create message content
591
+ let messageContent = [];
592
+
593
+ // Add text if present
594
+ if (text) {
595
+ messageContent.push({
596
+ type: "text",
597
+ text: text
598
+ });
599
+ }
600
+
601
+ // Add images if present
602
+ if (attachedFiles.length > 0) {
603
+ for (const file of attachedFiles) {
604
+ if (file.type.startsWith('image/')) {
605
+ try {
606
+ const base64 = await fileToBase64(file);
607
+ messageContent.push({
608
+ type: "image_url",
609
+ image_url: {
610
+ url: `data:${file.type};base64,${base64}`
611
+ }
612
+ });
613
+ } catch (error) {
614
+ console.error('Error converting image to base64:', error);
615
+ }
616
+ }
617
+ }
618
+ }
619
+
620
+ // Display user message (show text and file names)
621
+ let displayText = text;
622
+ if (attachedFiles.length > 0) {
623
+ const fileNames = attachedFiles.map(f => f.name || 'pasted-image').join(', ');
624
+ displayText = displayText ? `${displayText}\n[Images: ${fileNames}]` : `[Images: ${fileNames}]`;
625
+ }
626
+
627
+ appendMessage('user', displayText);
628
+
629
+ // Add to messages array
630
+ const userMessage = {
631
+ role: 'user',
632
+ content: messageContent.length === 1 && messageContent[0].type === "text"
633
+ ? messageContent[0].text
634
+ : messageContent
635
+ };
636
+ messages.push(userMessage);
637
+
638
+ // Clear input and attachments
639
+ chatInput.value = '';
640
+ attachedFiles = [];
641
+ fileAttachment.value = '';
642
+ updateInputPlaceholder(); // Reset placeholder
643
+ updateAttachmentPreviewVisibility(); // Hide preview container
644
+ updateAttachmentPreviews(); // Clear previews
645
+ sendBtn.disabled = true;
646
+
647
+ // Streaming OpenAI completions (placeholder, adapt as needed)
648
+ let llmText = '';
649
+ const llmBubble = appendMessage('llm', '...');
650
+ try {
651
+ // Use the correct endpoint for chat completions with model settings
652
+ const modelSettings = getCurrentModelSettings ? getCurrentModelSettings() : {};
653
+ console.log('Applying model settings to API request:', modelSettings);
654
+
655
+ const payload = {
656
+ model: currentLoadedModel,
657
+ messages: messages,
658
+ stream: true,
659
+ ...modelSettings // Apply current model settings
660
+ };
661
+
662
+ const resp = await httpRequest(getServerBaseUrl() + '/api/v1/chat/completions', {
663
+ method: 'POST',
664
+ headers: { 'Content-Type': 'application/json' },
665
+ body: JSON.stringify(payload)
666
+ });
667
+ if (!resp.body) throw new Error('No stream');
668
+ const reader = resp.body.getReader();
669
+ let decoder = new TextDecoder();
670
+ llmBubble.textContent = '';
671
+ while (true) {
672
+ const { done, value } = await reader.read();
673
+ if (done) break;
674
+ const chunk = decoder.decode(value);
675
+ if (chunk.trim() === 'data: [DONE]' || chunk.trim() === '[DONE]') continue;
676
+
677
+ // Handle Server-Sent Events format
678
+ const lines = chunk.split('\n');
679
+ for (const line of lines) {
680
+ if (line.startsWith('data: ')) {
681
+ const jsonStr = line.substring(6).trim();
682
+ if (jsonStr === '[DONE]') continue;
683
+
684
+ try {
685
+ const delta = JSON.parse(jsonStr);
686
+ if (delta.choices && delta.choices[0] && delta.choices[0].delta) {
687
+ const content = delta.choices[0].delta.content;
688
+ if (content) {
689
+ llmText += unescapeJsonString(content);
690
+ updateMessageContent(llmBubble, llmText, true);
691
+ chatHistory.scrollTop = chatHistory.scrollHeight;
692
+ }
693
+ }
694
+ } catch (parseErr) {
695
+ console.warn('Failed to parse JSON:', jsonStr, parseErr);
696
+ }
697
+ }
698
+ }
699
+ }
700
+ if (!llmText) throw new Error('No response');
701
+
702
+ // Split assistant response into content and reasoning_content so llama.cpp's Jinja does not need to parse <think> tags
703
+ function splitAssistantResponse(text) {
704
+ const THINK_OPEN = '<think>';
705
+ const THINK_CLOSE = '</think>';
706
+ const result = { content: text };
707
+ const start = text.indexOf(THINK_OPEN);
708
+ const end = text.indexOf(THINK_CLOSE);
709
+ if (start !== -1 && end !== -1 && end > start) {
710
+ const reasoning = text.substring(start + THINK_OPEN.length, end).trim();
711
+ const visible = (text.substring(0, start) + text.substring(end + THINK_CLOSE.length)).trim();
712
+ if (reasoning) result.reasoning_content = reasoning;
713
+ result.content = visible;
714
+ }
715
+ return result;
716
+ }
717
+
718
+ const assistantMsg = splitAssistantResponse(llmText);
719
+ messages.push({ role: 'assistant', ...assistantMsg });
720
+ } catch (e) {
721
+ let detail = e.message;
722
+ try {
723
+ const errPayload = { ...payload, stream: false };
724
+ const errResp = await httpJson(getServerBaseUrl() + '/api/v1/chat/completions', {
725
+ method: 'POST',
726
+ headers: { 'Content-Type': 'application/json' },
727
+ body: JSON.stringify(errPayload)
728
+ });
729
+ if (errResp && errResp.detail) detail = errResp.detail;
730
+ } catch (_) {}
731
+ llmBubble.textContent = '[Error: ' + detail + ']';
732
+ showErrorBanner(`Chat error: ${detail}`);
733
+ }
734
+ sendBtn.disabled = false;
735
+ }