chat-console 0.1.5__py3-none-any.whl → 0.1.6.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
app/api/ollama.py CHANGED
@@ -161,6 +161,37 @@ class OllamaClient(BaseModelClient):
161
161
 
162
162
  while retries >= 0:
163
163
  try:
164
+ # First try a quick test request to check if model is loaded
165
+ async with aiohttp.ClientSession() as session:
166
+ try:
167
+ logger.info("Testing model availability...")
168
+ async with session.post(
169
+ f"{self.base_url}/api/generate",
170
+ json={
171
+ "model": model,
172
+ "prompt": "test",
173
+ "temperature": temperature,
174
+ "stream": False
175
+ },
176
+ timeout=2
177
+ ) as response:
178
+ if response.status != 200:
179
+ logger.warning(f"Model test request failed with status {response.status}")
180
+ raise aiohttp.ClientError("Model not ready")
181
+ except (aiohttp.ClientError, asyncio.TimeoutError) as e:
182
+ logger.info(f"Model cold start detected: {str(e)}")
183
+ # Model might need loading, try pulling it
184
+ async with session.post(
185
+ f"{self.base_url}/api/pull",
186
+ json={"name": model},
187
+ timeout=60
188
+ ) as pull_response:
189
+ if pull_response.status != 200:
190
+ logger.error("Failed to pull model")
191
+ raise Exception("Failed to pull model")
192
+ logger.info("Model pulled successfully")
193
+
194
+ # Now proceed with actual generation
164
195
  async with aiohttp.ClientSession() as session:
165
196
  logger.debug(f"Sending streaming request to {self.base_url}/api/generate")
166
197
  async with session.post(
@@ -171,7 +202,7 @@ class OllamaClient(BaseModelClient):
171
202
  "temperature": temperature,
172
203
  "stream": True
173
204
  },
174
- timeout=30
205
+ timeout=60 # Longer timeout for actual generation
175
206
  ) as response:
176
207
  response.raise_for_status()
177
208
  async for line in response.content:
app/main.py CHANGED
@@ -520,43 +520,85 @@ class SimpleChatApp(App):
520
520
  # Add small delay to show thinking state
521
521
  await asyncio.sleep(0.5)
522
522
 
523
- # Stream chunks to the UI
523
+ # Stream chunks to the UI with synchronization
524
+ update_lock = asyncio.Lock()
525
+
524
526
  async def update_ui(content: str):
525
527
  if not self.is_generating:
526
528
  return
527
529
 
528
- try:
529
- # Clear thinking indicator on first content
530
- if assistant_message.content == "Thinking...":
531
- assistant_message.content = ""
532
-
533
- # Update message with full content so far
534
- assistant_message.content = content
535
- # Update UI with full content
536
- message_display.update_content(content)
537
- messages_container.scroll_end(animate=False)
538
- # Let the event loop process the update
539
- await asyncio.sleep(0)
540
- except Exception as e:
541
- self.notify(f"Error updating UI: {str(e)}", severity="error")
530
+ async with update_lock:
531
+ try:
532
+ # Clear thinking indicator on first content
533
+ if assistant_message.content == "Thinking...":
534
+ assistant_message.content = ""
535
+
536
+ # Update message with full content so far
537
+ assistant_message.content = content
538
+ # Update UI with full content
539
+ await message_display.update_content(content)
540
+ # Force a refresh and scroll
541
+ self.refresh(layout=True)
542
+ await asyncio.sleep(0.05) # Longer delay for UI stability
543
+ messages_container.scroll_end(animate=False)
544
+ # Force another refresh to ensure content is visible
545
+ self.refresh(layout=True)
546
+ except Exception as e:
547
+ logger.error(f"Error updating UI: {str(e)}")
542
548
 
543
- # Generate the response
544
- full_response = await generate_streaming_response(
545
- api_messages,
546
- model,
547
- style,
548
- client,
549
- update_ui
550
- )
551
-
552
- # Save to database
553
- if self.is_generating: # Only save if not cancelled
554
- self.db.add_message(
555
- self.current_conversation.id,
556
- "assistant",
557
- full_response
549
+ # Generate the response with timeout and cleanup
550
+ generation_task = None
551
+ try:
552
+ # Create a task for the response generation
553
+ generation_task = asyncio.create_task(
554
+ generate_streaming_response(
555
+ api_messages,
556
+ model,
557
+ style,
558
+ client,
559
+ update_ui
560
+ )
558
561
  )
559
562
 
563
+ # Wait for response with timeout
564
+ full_response = await asyncio.wait_for(generation_task, timeout=60) # Longer timeout
565
+
566
+ # Save to database only if we got a complete response
567
+ if self.is_generating and full_response:
568
+ self.db.add_message(
569
+ self.current_conversation.id,
570
+ "assistant",
571
+ full_response
572
+ )
573
+ # Force a final refresh
574
+ self.refresh(layout=True)
575
+ await asyncio.sleep(0.1) # Wait for UI to update
576
+
577
+ except asyncio.TimeoutError:
578
+ logger.error("Response generation timed out")
579
+ error_msg = "Response generation timed out. The model may be busy or unresponsive. Please try again."
580
+ self.notify(error_msg, severity="error")
581
+
582
+ # Remove the incomplete message
583
+ if self.messages and self.messages[-1].role == "assistant":
584
+ self.messages.pop()
585
+
586
+ # Update UI to remove the incomplete message
587
+ await self.update_messages_ui()
588
+
589
+ finally:
590
+ # Ensure task is properly cancelled and cleaned up
591
+ if generation_task:
592
+ if not generation_task.done():
593
+ generation_task.cancel()
594
+ try:
595
+ await generation_task
596
+ except (asyncio.CancelledError, Exception) as e:
597
+ logger.error(f"Error cleaning up generation task: {str(e)}")
598
+
599
+ # Force a final UI refresh
600
+ self.refresh(layout=True)
601
+
560
602
  except Exception as e:
561
603
  self.notify(f"Error generating response: {str(e)}", severity="error")
562
604
  # Add error message
app/ui/chat_interface.py CHANGED
@@ -3,6 +3,7 @@ import time
3
3
  import asyncio
4
4
  from datetime import datetime
5
5
  import re
6
+ import logging
6
7
 
7
8
  from textual.app import ComposeResult
8
9
  from textual.containers import Container, ScrollableContainer, Vertical
@@ -16,6 +17,9 @@ from ..models import Message, Conversation
16
17
  from ..api.base import BaseModelClient
17
18
  from ..config import CONFIG
18
19
 
20
+ # Set up logging
21
+ logger = logging.getLogger(__name__)
22
+
19
23
  class MessageDisplay(RichLog):
20
24
  """Widget to display a single message"""
21
25
 
@@ -75,13 +79,15 @@ class MessageDisplay(RichLog):
75
79
  # Initial content
76
80
  self.write(self._format_content(self.message.content))
77
81
 
78
- def update_content(self, content: str) -> None:
82
+ async def update_content(self, content: str) -> None:
79
83
  """Update the message content"""
80
84
  self.message.content = content
81
85
  self.clear()
82
86
  self.write(self._format_content(content))
83
87
  # Force a refresh after writing
84
88
  self.refresh(layout=True)
89
+ # Wait a moment for the layout to update
90
+ await asyncio.sleep(0.05)
85
91
 
86
92
  def _format_content(self, content: str) -> str:
87
93
  """Format message content with timestamp"""
@@ -252,13 +258,17 @@ class ChatInterface(Container):
252
258
  )
253
259
  messages_container.mount(self.current_message_display)
254
260
 
261
+ # Force a layout refresh and wait for it to complete
262
+ self.refresh(layout=True)
263
+ await asyncio.sleep(0.1)
264
+
255
265
  # Save to conversation if exists
256
266
  if self.conversation and self.conversation.id:
257
267
  from ..database import ChatDatabase
258
268
  db = ChatDatabase()
259
269
  db.add_message(self.conversation.id, role, content)
260
270
 
261
- self.scroll_to_bottom()
271
+ await self.scroll_to_bottom()
262
272
 
263
273
  async def send_message(self) -> None:
264
274
  """Send a message"""
@@ -318,10 +328,10 @@ class ChatInterface(Container):
318
328
  for message in self.messages:
319
329
  display = MessageDisplay(message, highlight_code=CONFIG["highlight_code"])
320
330
  messages_container.mount(display)
321
- self.scroll_to_bottom()
322
- await asyncio.sleep(0.01) # Small delay to prevent UI freezing
331
+ await self.scroll_to_bottom()
332
+ await asyncio.sleep(0.05) # Small delay to prevent UI freezing
323
333
 
324
- self.scroll_to_bottom()
334
+ await self.scroll_to_bottom()
325
335
 
326
336
  # Re-focus the input field after changing conversation
327
337
  self.query_one("#message-input").focus()
@@ -333,19 +343,24 @@ class ChatInterface(Container):
333
343
  self.query_one("#message-input").focus()
334
344
 
335
345
  # Scroll to bottom to ensure the latest messages are visible
336
- self.scroll_to_bottom()
337
- except Exception:
338
- # Ignore errors during resize handling
339
- pass
346
+ asyncio.create_task(self.scroll_to_bottom())
347
+ except Exception as e:
348
+ logger.error(f"Error handling resize: {str(e)}")
340
349
 
341
- def scroll_to_bottom(self) -> None:
350
+ async def scroll_to_bottom(self) -> None:
342
351
  """Scroll to the bottom of the messages container"""
343
352
  try:
344
353
  messages_container = self.query_one("#messages-container")
354
+ # Force a layout refresh
355
+ self.refresh(layout=True)
356
+ # Wait a moment for layout to update
357
+ await asyncio.sleep(0.1)
358
+ # Scroll to bottom
345
359
  messages_container.scroll_end(animate=False)
346
- except Exception:
347
- # Container might not be available yet or scroll_end might not work
348
- pass
360
+ # Force another refresh
361
+ self.refresh(layout=True)
362
+ except Exception as e:
363
+ logger.error(f"Error scrolling to bottom: {str(e)}")
349
364
 
350
365
  def watch_is_loading(self, is_loading: bool) -> None:
351
366
  """Watch the is_loading property"""
app/utils.py CHANGED
@@ -1,5 +1,7 @@
1
1
  import os
2
2
  import json
3
+ import time
4
+ import asyncio
3
5
  import subprocess
4
6
  import logging
5
7
  from typing import Optional, Dict, Any, List
@@ -13,11 +15,33 @@ async def generate_streaming_response(messages: List[Dict], model: str, style: s
13
15
  """Generate a streaming response from the model"""
14
16
  logger.info(f"Starting streaming response with model: {model}")
15
17
  full_response = ""
18
+ buffer = []
19
+ last_update = time.time()
20
+ update_interval = 0.1 # Update UI every 100ms
21
+
16
22
  try:
17
23
  async for chunk in client.generate_stream(messages, model, style):
18
24
  if chunk: # Only process non-empty chunks
19
- full_response += chunk
20
- await callback(full_response) # Send full response so far
25
+ buffer.append(chunk)
26
+ current_time = time.time()
27
+
28
+ # Update UI if enough time has passed or buffer is large
29
+ if current_time - last_update >= update_interval or len(''.join(buffer)) > 100:
30
+ new_content = ''.join(buffer)
31
+ full_response += new_content
32
+ await callback(full_response)
33
+ buffer = []
34
+ last_update = current_time
35
+
36
+ # Small delay to let UI catch up
37
+ await asyncio.sleep(0.05)
38
+
39
+ # Send any remaining content
40
+ if buffer:
41
+ new_content = ''.join(buffer)
42
+ full_response += new_content
43
+ await callback(full_response)
44
+
21
45
  logger.info("Streaming response completed")
22
46
  return full_response
23
47
  except Exception as e:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: chat-console
3
- Version: 0.1.5
3
+ Version: 0.1.6.dev1
4
4
  Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
5
5
  Home-page: https://github.com/wazacraftrfid/chat-console
6
6
  Author: Johnathan Greenaway
@@ -1,23 +1,23 @@
1
1
  app/__init__.py,sha256=u5X4kPcpqZ12ZLnhwwOCScNvftaknDTrb0DMXqR_iLc,130
2
2
  app/config.py,sha256=7C09kn2bmda9frTPfZ7f1JhagqHAZjGM5BYqZmhegYM,5190
3
3
  app/database.py,sha256=nt8CVuDpy6zw8mOYqDcfUmNw611t7Ln7pz22M0b6-MI,9967
4
- app/main.py,sha256=ZeLroiPrlGgXArL0Po545PB3SU6imkx2KATjld1hV6s,21996
4
+ app/main.py,sha256=vVfCgdbfuHjXwSoPt0hfZ9Ee4tpat8MA2tNwXKj21wQ,24179
5
5
  app/models.py,sha256=4-y9Lytay2exWPFi0FDlVeRL3K2-I7E-jBqNzTfokqY,2644
6
- app/utils.py,sha256=tbMhutE3vg9seGstD5k8MyUhJo5XbJ17p64dl2wTqYY,3481
6
+ app/utils.py,sha256=zK8aTPdadXomyG2Kgpi7WuC5XYwfShJj74bXWSLtyW0,4309
7
7
  app/api/__init__.py,sha256=A8UL84ldYlv8l7O-yKzraVFcfww86SgWfpl4p7R03-w,62
8
8
  app/api/anthropic.py,sha256=x5PmBXEKe_ow2NWk8XdqSPR0hLOdCc_ypY5QAySeA78,4234
9
9
  app/api/base.py,sha256=-6RSxSpqe-OMwkaq1wVWbu3pVkte-ZYy8rmdvt-Qh48,3953
10
- app/api/ollama.py,sha256=2Yqyc6d3lwShAx4j1A97y7iPZWLeMw-wumtnhvQzAxY,9869
10
+ app/api/ollama.py,sha256=zFZ3g2sYncvMgcvx92jTCLkigIaDvTuhILcLiCrwisc,11640
11
11
  app/api/openai.py,sha256=1fYgFXXL6yj_7lQ893Yj28RYG4M8d6gt_q1gzhhjcig,3641
12
12
  app/ui/__init__.py,sha256=RndfbQ1Tv47qdSiuQzvWP96lPS547SDaGE-BgOtiP_w,55
13
- app/ui/chat_interface.py,sha256=wFmCiSvwqp8Jia3nkMUxrYAou7Hr3UAqGhTvZoClVL8,11548
13
+ app/ui/chat_interface.py,sha256=lzBpFLTKhGHCKlJaTl5NIjUm15tly7ZQKmp74QzYdVk,12142
14
14
  app/ui/chat_list.py,sha256=WQTYVNSSXlx_gQal3YqILZZKL9UiTjmNMIDX2I9pAMM,11205
15
15
  app/ui/model_selector.py,sha256=xCuaohgYvebgP0Eel6-XzUn-7Y0SrJUArdTr-CDBZXc,12840
16
16
  app/ui/search.py,sha256=b-m14kG3ovqW1-i0qDQ8KnAqFJbi5b1FLM9dOnbTyIs,9763
17
17
  app/ui/styles.py,sha256=04AhPuLrOd2yenfRySFRestPeuTPeMLzhmMB67NdGvw,5615
18
- chat_console-0.1.5.dist-info/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
19
- chat_console-0.1.5.dist-info/METADATA,sha256=zS5hPtT7INca9w40E2gSFn5fpkD2zz9v0x1JnnsU7HI,2899
20
- chat_console-0.1.5.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
21
- chat_console-0.1.5.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
22
- chat_console-0.1.5.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
23
- chat_console-0.1.5.dist-info/RECORD,,
18
+ chat_console-0.1.6.dev1.dist-info/LICENSE,sha256=srHZ3fvcAuZY1LHxE7P6XWju2njRCHyK6h_ftEbzxSE,1057
19
+ chat_console-0.1.6.dev1.dist-info/METADATA,sha256=dDkAEFkFOyYQpOMV08KTMQwMQRI59kZt-nH7QP9HvUY,2904
20
+ chat_console-0.1.6.dev1.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
21
+ chat_console-0.1.6.dev1.dist-info/entry_points.txt,sha256=kkVdEc22U9PAi2AeruoKklfkng_a_aHAP6VRVwrAD7c,67
22
+ chat_console-0.1.6.dev1.dist-info/top_level.txt,sha256=io9g7LCbfmTG1SFKgEOGXmCFB9uMP2H5lerm0HiHWQE,4
23
+ chat_console-0.1.6.dev1.dist-info/RECORD,,