chat-console 0.1.95.dev1__tar.gz → 0.1.991.dev1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/PKG-INFO +1 -1
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/config.py +2 -1
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/main.py +221 -23
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/ui/chat_interface.py +16 -13
- chat_console-0.1.991.dev1/app/utils.py +194 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/chat_console.egg-info/PKG-INFO +1 -1
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/setup.py +1 -1
- chat_console-0.1.95.dev1/app/utils.py +0 -111
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/LICENSE +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/README.md +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/__init__.py +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/api/__init__.py +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/api/anthropic.py +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/api/base.py +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/api/ollama.py +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/api/openai.py +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/database.py +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/models.py +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/ui/__init__.py +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/ui/chat_list.py +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/ui/model_selector.py +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/ui/search.py +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/app/ui/styles.py +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/chat_console.egg-info/SOURCES.txt +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/chat_console.egg-info/dependency_links.txt +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/chat_console.egg-info/entry_points.txt +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/chat_console.egg-info/requires.txt +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/chat_console.egg-info/top_level.txt +0 -0
- {chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: chat-console
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.991.dev1
|
4
4
|
Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
|
5
5
|
Home-page: https://github.com/wazacraftrfid/chat-console
|
6
6
|
Author: Johnathan Greenaway
|
@@ -13,17 +13,18 @@ from textual.containers import Container, Horizontal, Vertical, ScrollableContai
|
|
13
13
|
from textual.reactive import reactive
|
14
14
|
from textual.widgets import Button, Input, Label, Static, Header, Footer, ListView, ListItem
|
15
15
|
from textual.binding import Binding
|
16
|
-
from textual import work
|
16
|
+
from textual import work, log, on
|
17
17
|
from textual.screen import Screen
|
18
18
|
from openai import OpenAI
|
19
19
|
from app.models import Message, Conversation
|
20
20
|
from app.database import ChatDatabase
|
21
21
|
from app.config import CONFIG, OPENAI_API_KEY, ANTHROPIC_API_KEY, OLLAMA_BASE_URL
|
22
|
-
|
22
|
+
# Import InputWithFocus as well
|
23
|
+
from app.ui.chat_interface import MessageDisplay, InputWithFocus
|
23
24
|
from app.ui.model_selector import ModelSelector, StyleSelector
|
24
25
|
from app.ui.chat_list import ChatList
|
25
26
|
from app.api.base import BaseModelClient
|
26
|
-
from app.utils import generate_streaming_response, save_settings_to_config # Import
|
27
|
+
from app.utils import generate_streaming_response, save_settings_to_config, generate_conversation_title # Import title function
|
27
28
|
|
28
29
|
# --- Remove SettingsScreen class entirely ---
|
29
30
|
|
@@ -114,10 +115,15 @@ class HistoryScreen(Screen):
|
|
114
115
|
class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
115
116
|
"""Simplified Chat CLI application.""" # Keep SimpleChatApp docstring
|
116
117
|
|
117
|
-
TITLE = "Chat
|
118
|
+
TITLE = "Chat Console"
|
118
119
|
SUB_TITLE = "AI Chat Interface" # Keep SimpleChatApp SUB_TITLE
|
119
120
|
DARK = True # Keep SimpleChatApp DARK
|
120
121
|
|
122
|
+
# Ensure the log directory exists in a standard cache location
|
123
|
+
log_dir = os.path.expanduser("~/.cache/chat-cli")
|
124
|
+
os.makedirs(log_dir, exist_ok=True)
|
125
|
+
LOG_FILE = os.path.join(log_dir, "textual.log") # Use absolute path
|
126
|
+
|
121
127
|
CSS = """ # Keep SimpleChatApp CSS start
|
122
128
|
#main-content { # Keep SimpleChatApp CSS
|
123
129
|
width: 100%;
|
@@ -209,18 +215,48 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
209
215
|
padding-top: 1;
|
210
216
|
}
|
211
217
|
|
218
|
+
/* --- Title Input Modal CSS --- */
|
219
|
+
TitleInputModal {
|
220
|
+
align: center middle;
|
221
|
+
width: 60;
|
222
|
+
height: auto;
|
223
|
+
background: $surface;
|
224
|
+
border: thick $primary;
|
225
|
+
padding: 1 2;
|
226
|
+
layer: modal; /* Ensure it's above other elements */
|
227
|
+
}
|
228
|
+
|
229
|
+
#modal-label {
|
230
|
+
width: 100%;
|
231
|
+
content-align: center middle;
|
232
|
+
padding-bottom: 1;
|
233
|
+
}
|
234
|
+
|
235
|
+
#title-input {
|
236
|
+
width: 100%;
|
237
|
+
margin-bottom: 1;
|
238
|
+
}
|
239
|
+
|
240
|
+
TitleInputModal Horizontal {
|
241
|
+
width: 100%;
|
242
|
+
height: auto;
|
243
|
+
align: center middle;
|
244
|
+
}
|
212
245
|
"""
|
213
246
|
|
214
247
|
BINDINGS = [ # Keep SimpleChatApp BINDINGS, ensure Enter is not globally bound for settings
|
215
248
|
Binding("q", "quit", "Quit", show=True, key_display="q"),
|
249
|
+
# Removed priority=True - actions should only trigger when input is NOT focused
|
216
250
|
Binding("n", "action_new_conversation", "New Chat", show=True, key_display="n"),
|
217
|
-
Binding("c", "action_new_conversation", "New Chat", show=False, key_display="c"),
|
251
|
+
Binding("c", "action_new_conversation", "New Chat", show=False, key_display="c"), # Removed priority from alias
|
218
252
|
Binding("escape", "escape", "Cancel / Stop", show=True, key_display="esc"), # Escape might close settings panel too
|
219
253
|
Binding("ctrl+c", "quit", "Quit", show=False),
|
220
|
-
Binding("h", "view_history", "History", show=True, key_display="h"),
|
221
|
-
Binding("s", "settings", "Settings", show=True, key_display="s"),
|
254
|
+
Binding("h", "view_history", "History", show=True, key_display="h"), # Action method checks focus
|
255
|
+
Binding("s", "settings", "Settings", show=True, key_display="s"), # Action method checks focus
|
256
|
+
# Removed priority=True - action should only trigger when input is NOT focused
|
257
|
+
Binding("t", "action_update_title", "Update Title", show=True, key_display="t"),
|
222
258
|
] # Keep SimpleChatApp BINDINGS end
|
223
|
-
|
259
|
+
|
224
260
|
current_conversation = reactive(None) # Keep SimpleChatApp reactive var
|
225
261
|
is_generating = reactive(False) # Keep SimpleChatApp reactive var
|
226
262
|
|
@@ -231,7 +267,8 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
231
267
|
self.selected_model = CONFIG["default_model"] # Keep SimpleChatApp __init__
|
232
268
|
self.selected_style = CONFIG["default_style"] # Keep SimpleChatApp __init__
|
233
269
|
self.initial_text = initial_text # Keep SimpleChatApp __init__
|
234
|
-
|
270
|
+
# Removed self.input_widget instance variable
|
271
|
+
|
235
272
|
def compose(self) -> ComposeResult: # Modify SimpleChatApp compose
|
236
273
|
"""Create the simplified application layout."""
|
237
274
|
yield Header()
|
@@ -250,7 +287,8 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
250
287
|
|
251
288
|
# Input area
|
252
289
|
with Container(id="input-area"):
|
253
|
-
|
290
|
+
# Use the custom InputWithFocus widget
|
291
|
+
yield InputWithFocus(placeholder="Type your message here...", id="message-input")
|
254
292
|
# Removed Static widgets previously used for diagnosis
|
255
293
|
|
256
294
|
# --- Add Settings Panel (hidden initially) ---
|
@@ -307,10 +345,12 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
307
345
|
await self.action_send_message() # Keep SimpleChatApp on_mount
|
308
346
|
else: # Keep SimpleChatApp on_mount
|
309
347
|
# Focus the input if no initial text # Keep SimpleChatApp on_mount
|
348
|
+
# Removed assignment to self.input_widget
|
310
349
|
self.query_one("#message-input").focus() # Keep SimpleChatApp on_mount
|
311
|
-
|
350
|
+
|
312
351
|
async def create_new_conversation(self) -> None: # Keep SimpleChatApp create_new_conversation
|
313
352
|
"""Create a new chat conversation.""" # Keep SimpleChatApp create_new_conversation docstring
|
353
|
+
log("Entering create_new_conversation") # Added log
|
314
354
|
# Create new conversation in database using selected model and style # Keep SimpleChatApp create_new_conversation
|
315
355
|
model = self.selected_model # Keep SimpleChatApp create_new_conversation
|
316
356
|
style = self.selected_style # Keep SimpleChatApp create_new_conversation
|
@@ -319,7 +359,9 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
319
359
|
title = f"New conversation ({datetime.now().strftime('%Y-%m-%d %H:%M')})" # Keep SimpleChatApp create_new_conversation
|
320
360
|
|
321
361
|
# Create conversation in database using the correct method # Keep SimpleChatApp create_new_conversation
|
362
|
+
log(f"Creating conversation with title: {title}, model: {model}, style: {style}") # Added log
|
322
363
|
conversation_id = self.db.create_conversation(title, model, style) # Keep SimpleChatApp create_new_conversation
|
364
|
+
log(f"Database returned conversation_id: {conversation_id}") # Added log
|
323
365
|
|
324
366
|
# Get the full conversation data # Keep SimpleChatApp create_new_conversation
|
325
367
|
conversation_data = self.db.get_conversation(conversation_id) # Keep SimpleChatApp create_new_conversation
|
@@ -333,26 +375,42 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
333
375
|
|
334
376
|
# Clear messages and update UI # Keep SimpleChatApp create_new_conversation
|
335
377
|
self.messages = [] # Keep SimpleChatApp create_new_conversation
|
378
|
+
log("Finished updating messages UI in create_new_conversation") # Added log
|
336
379
|
await self.update_messages_ui() # Keep SimpleChatApp create_new_conversation
|
337
|
-
|
380
|
+
|
338
381
|
async def action_new_conversation(self) -> None: # Keep SimpleChatApp action_new_conversation
|
339
382
|
"""Handle the new conversation action.""" # Keep SimpleChatApp action_new_conversation docstring
|
340
|
-
|
383
|
+
log("--- ENTERING action_new_conversation ---") # Add entry log
|
341
384
|
|
385
|
+
# Check if the currently focused widget is the input widget
|
386
|
+
currently_focused = self.focused
|
387
|
+
if currently_focused and currently_focused.id == "message-input":
|
388
|
+
log("action_new_conversation skipped: input has focus")
|
389
|
+
return
|
390
|
+
|
391
|
+
log("action_new_conversation EXECUTING") # Add execution log
|
392
|
+
await self.create_new_conversation() # Keep SimpleChatApp action_new_conversation
|
393
|
+
log("action_new_conversation finished") # Added log
|
394
|
+
|
342
395
|
def action_escape(self) -> None: # Modify SimpleChatApp action_escape
|
343
396
|
"""Handle escape key globally."""
|
397
|
+
log("action_escape triggered") # Added log
|
344
398
|
settings_panel = self.query_one("#settings-panel")
|
399
|
+
log(f"Settings panel visible: {settings_panel.has_class('visible')}") # Added log
|
345
400
|
if settings_panel.has_class("visible"):
|
401
|
+
log("Hiding settings panel") # Added log
|
346
402
|
# If settings panel is visible, hide it
|
347
403
|
settings_panel.remove_class("visible")
|
348
404
|
self.query_one("#message-input").focus() # Focus input after closing settings
|
349
405
|
elif self.is_generating:
|
406
|
+
log("Stopping generation") # Added log
|
350
407
|
# Otherwise, stop generation if running
|
351
408
|
self.is_generating = False # Keep SimpleChatApp action_escape
|
352
409
|
self.notify("Generation stopped", severity="warning") # Keep SimpleChatApp action_escape
|
353
410
|
loading = self.query_one("#loading-indicator") # Keep SimpleChatApp action_escape
|
354
411
|
loading.add_class("hidden") # Keep SimpleChatApp action_escape
|
355
|
-
|
412
|
+
else: # Optional: Add other escape behavior for the main screen if desired # Keep SimpleChatApp action_escape comment
|
413
|
+
log("Escape pressed, but settings not visible and not generating.") # Added log
|
356
414
|
# pass # Keep SimpleChatApp action_escape comment
|
357
415
|
|
358
416
|
# Removed action_confirm_or_send - Enter is handled by Input submission # Keep SimpleChatApp comment
|
@@ -399,14 +457,62 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
399
457
|
content # Keep SimpleChatApp action_send_message
|
400
458
|
) # Keep SimpleChatApp action_send_message
|
401
459
|
|
402
|
-
#
|
403
|
-
|
460
|
+
# Check if this is the first message in the conversation
|
461
|
+
# Note: We check length *before* adding the potential assistant message
|
462
|
+
is_first_message = len(self.messages) == 1
|
463
|
+
|
464
|
+
# Update UI with user message first
|
465
|
+
await self.update_messages_ui()
|
404
466
|
|
405
|
-
#
|
406
|
-
|
467
|
+
# If this is the first message and dynamic titles are enabled, generate one
|
468
|
+
if is_first_message and self.current_conversation and CONFIG.get("generate_dynamic_titles", True):
|
469
|
+
log("First message detected, generating title...")
|
470
|
+
title_generation_in_progress = True # Use a local flag
|
471
|
+
loading = self.query_one("#loading-indicator")
|
472
|
+
loading.remove_class("hidden") # Show loading for title gen
|
473
|
+
|
474
|
+
try:
|
475
|
+
# Get appropriate client
|
476
|
+
model = self.selected_model
|
477
|
+
client = BaseModelClient.get_client_for_model(model)
|
478
|
+
if client is None:
|
479
|
+
raise Exception(f"No client available for model: {model}")
|
480
|
+
|
481
|
+
# Generate title
|
482
|
+
log(f"Calling generate_conversation_title with model: {model}")
|
483
|
+
title = await generate_conversation_title(content, model, client)
|
484
|
+
log(f"Generated title: {title}")
|
485
|
+
|
486
|
+
# Update conversation title in database
|
487
|
+
self.db.update_conversation(
|
488
|
+
self.current_conversation.id,
|
489
|
+
title=title
|
490
|
+
)
|
491
|
+
|
492
|
+
# Update UI title
|
493
|
+
title_widget = self.query_one("#conversation-title", Static)
|
494
|
+
title_widget.update(title)
|
495
|
+
|
496
|
+
# Update conversation object
|
497
|
+
self.current_conversation.title = title
|
498
|
+
|
499
|
+
self.notify(f"Conversation title set to: {title}", severity="information", timeout=3)
|
500
|
+
|
501
|
+
except Exception as e:
|
502
|
+
log.error(f"Failed to generate title: {str(e)}")
|
503
|
+
self.notify(f"Failed to generate title: {str(e)}", severity="warning")
|
504
|
+
finally:
|
505
|
+
title_generation_in_progress = False
|
506
|
+
# Hide loading indicator *only if* AI response generation isn't about to start
|
507
|
+
# This check might be redundant if generate_response always shows it anyway
|
508
|
+
if not self.is_generating:
|
509
|
+
loading.add_class("hidden")
|
510
|
+
|
511
|
+
# Generate AI response (will set self.is_generating and handle loading indicator)
|
512
|
+
await self.generate_response()
|
407
513
|
|
408
|
-
# Focus back on input
|
409
|
-
input_widget.focus()
|
514
|
+
# Focus back on input
|
515
|
+
input_widget.focus()
|
410
516
|
|
411
517
|
async def generate_response(self) -> None: # Keep SimpleChatApp generate_response
|
412
518
|
"""Generate an AI response.""" # Keep SimpleChatApp generate_response docstring
|
@@ -414,6 +520,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
414
520
|
return # Keep SimpleChatApp generate_response
|
415
521
|
|
416
522
|
self.is_generating = True # Keep SimpleChatApp generate_response
|
523
|
+
log(f"Setting is_generating to True") # Added log
|
417
524
|
loading = self.query_one("#loading-indicator") # Keep SimpleChatApp generate_response
|
418
525
|
loading.remove_class("hidden") # Keep SimpleChatApp generate_response
|
419
526
|
|
@@ -455,6 +562,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
455
562
|
|
456
563
|
async def update_ui(content: str): # Keep SimpleChatApp generate_response
|
457
564
|
if not self.is_generating: # Keep SimpleChatApp generate_response
|
565
|
+
log("update_ui called but is_generating is False, returning.") # Added log
|
458
566
|
return # Keep SimpleChatApp generate_response
|
459
567
|
|
460
568
|
async with update_lock: # Keep SimpleChatApp generate_response
|
@@ -474,7 +582,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
474
582
|
# Force another refresh to ensure content is visible # Keep SimpleChatApp generate_response
|
475
583
|
self.refresh(layout=True) # Keep SimpleChatApp generate_response
|
476
584
|
except Exception as e: # Keep SimpleChatApp generate_response
|
477
|
-
|
585
|
+
log.error(f"Error updating UI: {str(e)}") # Use log instead of logger
|
478
586
|
|
479
587
|
# Generate the response with timeout and cleanup # Keep SimpleChatApp generate_response
|
480
588
|
generation_task = None # Keep SimpleChatApp generate_response
|
@@ -482,6 +590,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
482
590
|
# Create a task for the response generation # Keep SimpleChatApp generate_response
|
483
591
|
generation_task = asyncio.create_task( # Keep SimpleChatApp generate_response
|
484
592
|
generate_streaming_response( # Keep SimpleChatApp generate_response
|
593
|
+
self, # Pass the app instance
|
485
594
|
api_messages, # Keep SimpleChatApp generate_response
|
486
595
|
model, # Keep SimpleChatApp generate_response
|
487
596
|
style, # Keep SimpleChatApp generate_response
|
@@ -495,6 +604,7 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
495
604
|
|
496
605
|
# Save to database only if we got a complete response # Keep SimpleChatApp generate_response
|
497
606
|
if self.is_generating and full_response: # Keep SimpleChatApp generate_response
|
607
|
+
log("Generation finished, saving full response to DB") # Added log
|
498
608
|
self.db.add_message( # Keep SimpleChatApp generate_response
|
499
609
|
self.current_conversation.id, # Keep SimpleChatApp generate_response
|
500
610
|
"assistant", # Keep SimpleChatApp generate_response
|
@@ -503,9 +613,11 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
503
613
|
# Force a final refresh # Keep SimpleChatApp generate_response
|
504
614
|
self.refresh(layout=True) # Keep SimpleChatApp generate_response
|
505
615
|
await asyncio.sleep(0.1) # Wait for UI to update # Keep SimpleChatApp generate_response
|
616
|
+
elif not full_response:
|
617
|
+
log("Generation finished but full_response is empty/None") # Added log
|
506
618
|
|
507
619
|
except asyncio.TimeoutError: # Keep SimpleChatApp generate_response
|
508
|
-
|
620
|
+
log.error("Response generation timed out") # Use log instead of logger
|
509
621
|
error_msg = "Response generation timed out. The model may be busy or unresponsive. Please try again." # Keep SimpleChatApp generate_response
|
510
622
|
self.notify(error_msg, severity="error") # Keep SimpleChatApp generate_response
|
511
623
|
|
@@ -520,22 +632,25 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
520
632
|
# Ensure task is properly cancelled and cleaned up # Keep SimpleChatApp generate_response
|
521
633
|
if generation_task: # Keep SimpleChatApp generate_response
|
522
634
|
if not generation_task.done(): # Keep SimpleChatApp generate_response
|
635
|
+
log("Cancelling generation task") # Added log
|
523
636
|
generation_task.cancel() # Keep SimpleChatApp generate_response
|
524
637
|
try: # Keep SimpleChatApp generate_response
|
525
638
|
await generation_task # Keep SimpleChatApp generate_response
|
526
639
|
except (asyncio.CancelledError, Exception) as e: # Keep SimpleChatApp generate_response
|
527
|
-
|
640
|
+
log.error(f"Error cleaning up generation task: {str(e)}") # Use log instead of logger
|
528
641
|
|
529
642
|
# Force a final UI refresh # Keep SimpleChatApp generate_response
|
530
643
|
self.refresh(layout=True) # Keep SimpleChatApp generate_response
|
531
644
|
|
532
645
|
except Exception as e: # Keep SimpleChatApp generate_response
|
646
|
+
log.error(f"Exception during generate_response: {str(e)}") # Added log
|
533
647
|
self.notify(f"Error generating response: {str(e)}", severity="error") # Keep SimpleChatApp generate_response
|
534
648
|
# Add error message # Keep SimpleChatApp generate_response
|
535
649
|
error_msg = f"Error generating response: {str(e)}" # Keep SimpleChatApp generate_response
|
536
650
|
self.messages.append(Message(role="assistant", content=error_msg)) # Keep SimpleChatApp generate_response
|
537
651
|
await self.update_messages_ui() # Keep SimpleChatApp generate_response
|
538
652
|
finally: # Keep SimpleChatApp generate_response
|
653
|
+
log(f"Setting is_generating to False in finally block") # Added log
|
539
654
|
self.is_generating = False # Keep SimpleChatApp generate_response
|
540
655
|
loading = self.query_one("#loading-indicator") # Keep SimpleChatApp generate_response
|
541
656
|
loading.add_class("hidden") # Keep SimpleChatApp generate_response
|
@@ -648,6 +763,89 @@ class SimpleChatApp(App): # Keep SimpleChatApp class definition
|
|
648
763
|
else:
|
649
764
|
input_widget.focus() # Focus input when closing
|
650
765
|
|
766
|
+
async def action_update_title(self) -> None:
|
767
|
+
"""Allow users to manually change the conversation title"""
|
768
|
+
log("--- ENTERING action_update_title ---") # Add entry log
|
769
|
+
|
770
|
+
# Check focus using self.focused instead of has_focus
|
771
|
+
currently_focused = self.focused
|
772
|
+
if currently_focused and currently_focused.id == "message-input":
|
773
|
+
log("action_update_title skipped: input has focus")
|
774
|
+
return
|
775
|
+
|
776
|
+
log("action_update_title EXECUTING") # Add execution log
|
777
|
+
|
778
|
+
if not self.current_conversation:
|
779
|
+
self.notify("No active conversation", severity="warning")
|
780
|
+
return
|
781
|
+
|
782
|
+
# --- Define the Modal Class ---
|
783
|
+
class TitleInputModal(Static):
|
784
|
+
def __init__(self, current_title: str):
|
785
|
+
super().__init__()
|
786
|
+
self.current_title = current_title
|
787
|
+
|
788
|
+
def compose(self) -> ComposeResult:
|
789
|
+
with Vertical(id="title-modal"):
|
790
|
+
yield Static("Enter new conversation title:", id="modal-label")
|
791
|
+
yield Input(value=self.current_title, id="title-input")
|
792
|
+
with Horizontal():
|
793
|
+
yield Button("Cancel", id="cancel-button", variant="error")
|
794
|
+
yield Button("Update", id="update-button", variant="success")
|
795
|
+
|
796
|
+
@on(Button.Pressed, "#update-button")
|
797
|
+
def update_title(self, event: Button.Pressed) -> None:
|
798
|
+
input_widget = self.query_one("#title-input", Input)
|
799
|
+
new_title = input_widget.value.strip()
|
800
|
+
if new_title:
|
801
|
+
# Call the app's update method asynchronously
|
802
|
+
asyncio.create_task(self.app.update_conversation_title(new_title))
|
803
|
+
self.remove() # Close the modal
|
804
|
+
|
805
|
+
@on(Button.Pressed, "#cancel-button")
|
806
|
+
def cancel(self, event: Button.Pressed) -> None:
|
807
|
+
self.remove() # Close the modal
|
808
|
+
|
809
|
+
def on_mount(self) -> None:
|
810
|
+
"""Focus the input when the modal appears."""
|
811
|
+
self.query_one("#title-input", Input).focus()
|
812
|
+
|
813
|
+
# --- Show the modal ---
|
814
|
+
modal = TitleInputModal(self.current_conversation.title)
|
815
|
+
await self.mount(modal) # Use await for mounting
|
816
|
+
|
817
|
+
async def update_conversation_title(self, new_title: str) -> None:
|
818
|
+
"""Update the current conversation title"""
|
819
|
+
if not self.current_conversation:
|
820
|
+
return
|
821
|
+
|
822
|
+
try:
|
823
|
+
# Update in database
|
824
|
+
self.db.update_conversation(
|
825
|
+
self.current_conversation.id,
|
826
|
+
title=new_title
|
827
|
+
)
|
828
|
+
|
829
|
+
# Update local object
|
830
|
+
self.current_conversation.title = new_title
|
831
|
+
|
832
|
+
# Update UI
|
833
|
+
title_widget = self.query_one("#conversation-title", Static)
|
834
|
+
title_widget.update(new_title)
|
835
|
+
|
836
|
+
# Update any chat list if visible
|
837
|
+
# Attempt to refresh ChatList if it exists
|
838
|
+
try:
|
839
|
+
chat_list = self.query_one(ChatList)
|
840
|
+
chat_list.refresh() # Call the refresh method
|
841
|
+
except Exception:
|
842
|
+
pass # Ignore if ChatList isn't found or refresh fails
|
843
|
+
|
844
|
+
self.notify("Title updated successfully", severity="information")
|
845
|
+
except Exception as e:
|
846
|
+
self.notify(f"Failed to update title: {str(e)}", severity="error")
|
847
|
+
|
848
|
+
|
651
849
|
def main(initial_text: Optional[str] = typer.Argument(None, help="Initial text to start the chat with")): # Keep main function
|
652
850
|
"""Entry point for the chat-cli application""" # Keep main function docstring
|
653
851
|
# When no argument is provided, typer passes the ArgumentInfo object # Keep main function
|
@@ -129,20 +129,23 @@ class MessageDisplay(RichLog):
|
|
129
129
|
|
130
130
|
class InputWithFocus(Input):
|
131
131
|
"""Enhanced Input that better handles focus and maintains cursor position"""
|
132
|
-
|
132
|
+
# Reverted on_key to default Input behavior for 'n' and 't'
|
133
|
+
# Let the standard Input handle key presses when focused.
|
134
|
+
# We will rely on focus checks within the App's action methods.
|
135
|
+
|
136
|
+
# Keep custom handling only for Enter submission if needed,
|
137
|
+
# but standard Input might already do this. Let's simplify
|
138
|
+
# and remove the custom on_key entirely for now unless
|
133
139
|
def on_key(self, event) -> None:
|
134
|
-
|
135
|
-
#
|
136
|
-
if event.
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
# Normal input handling for other keys
|
145
|
-
super().on_key(event)
|
140
|
+
# Let global hotkeys 'n' and 't' pass through even when input has focus
|
141
|
+
# by simply *not* stopping the event here.
|
142
|
+
if event.key == "n" or event.key == "t":
|
143
|
+
# Do nothing, allow the event to bubble up to the app level bindings.
|
144
|
+
return # Explicitly return to prevent further processing in this method
|
145
|
+
|
146
|
+
# For all other keys, the event continues to be processed by the Input
|
147
|
+
# widget's internal handlers (like _on_key shown in the traceback)
|
148
|
+
# because we didn't stop it in this method.
|
146
149
|
|
147
150
|
class ChatInterface(Container):
|
148
151
|
"""Main chat interface container"""
|
@@ -0,0 +1,194 @@
|
|
1
|
+
import os
|
2
|
+
import json
|
3
|
+
import time
|
4
|
+
import asyncio
|
5
|
+
import subprocess
|
6
|
+
import logging
|
7
|
+
from typing import Optional, Dict, Any, List, TYPE_CHECKING
|
8
|
+
from datetime import datetime
|
9
|
+
from .config import CONFIG, save_config
|
10
|
+
|
11
|
+
# Import SimpleChatApp for type hinting only if TYPE_CHECKING is True
|
12
|
+
if TYPE_CHECKING:
|
13
|
+
from .main import SimpleChatApp
|
14
|
+
|
15
|
+
# Set up logging
|
16
|
+
logging.basicConfig(level=logging.INFO)
|
17
|
+
logger = logging.getLogger(__name__)
|
18
|
+
|
19
|
+
async def generate_conversation_title(message: str, model: str, client: Any) -> str:
|
20
|
+
"""Generate a descriptive title for a conversation based on the first message"""
|
21
|
+
logger.info(f"Generating title for conversation using model: {model}")
|
22
|
+
|
23
|
+
# Create a special prompt for title generation
|
24
|
+
title_prompt = [
|
25
|
+
{
|
26
|
+
"role": "system",
|
27
|
+
"content": "Generate a brief, descriptive title (maximum 40 characters) for a conversation that starts with the following message. The title should be concise and reflect the main topic or query. Return only the title text with no additional explanation or formatting."
|
28
|
+
},
|
29
|
+
{
|
30
|
+
"role": "user",
|
31
|
+
"content": message
|
32
|
+
}
|
33
|
+
]
|
34
|
+
|
35
|
+
tries = 2 # Number of retries
|
36
|
+
last_error = None
|
37
|
+
|
38
|
+
while tries > 0:
|
39
|
+
try:
|
40
|
+
# Generate a title using the same model but with a separate request
|
41
|
+
# Assuming client has a method like generate_completion or similar
|
42
|
+
# Adjust the method call based on the actual client implementation
|
43
|
+
if hasattr(client, 'generate_completion'):
|
44
|
+
title = await client.generate_completion(
|
45
|
+
messages=title_prompt,
|
46
|
+
model=model,
|
47
|
+
temperature=0.7,
|
48
|
+
max_tokens=60 # Titles should be short
|
49
|
+
)
|
50
|
+
elif hasattr(client, 'generate_stream'): # Fallback or alternative method?
|
51
|
+
# If generate_completion isn't available, maybe adapt generate_stream?
|
52
|
+
# This part needs clarification based on the client's capabilities.
|
53
|
+
# For now, let's assume a hypothetical non-streaming call or adapt stream
|
54
|
+
# Simplified adaptation: collect stream chunks
|
55
|
+
title_chunks = []
|
56
|
+
async for chunk in client.generate_stream(title_prompt, model, style=""): # Assuming style might not apply or needs default
|
57
|
+
title_chunks.append(chunk)
|
58
|
+
title = "".join(title_chunks)
|
59
|
+
else:
|
60
|
+
raise NotImplementedError("Client does not support a suitable method for title generation.")
|
61
|
+
|
62
|
+
# Sanitize and limit the title
|
63
|
+
title = title.strip().strip('"\'').strip()
|
64
|
+
if len(title) > 40: # Set a maximum title length
|
65
|
+
title = title[:37] + "..."
|
66
|
+
|
67
|
+
logger.info(f"Generated title: {title}")
|
68
|
+
return title # Return successful title
|
69
|
+
|
70
|
+
except Exception as e:
|
71
|
+
last_error = str(e)
|
72
|
+
logger.error(f"Error generating title (tries left: {tries - 1}): {last_error}")
|
73
|
+
tries -= 1
|
74
|
+
if tries > 0: # Only sleep if there are more retries
|
75
|
+
await asyncio.sleep(1) # Small delay before retry
|
76
|
+
|
77
|
+
# If all retries fail, log the last error and return a default title
|
78
|
+
logger.error(f"Failed to generate title after multiple retries. Last error: {last_error}")
|
79
|
+
return f"Conversation ({datetime.now().strftime('%Y-%m-%d %H:%M')})"
|
80
|
+
|
81
|
+
# Modified signature to accept app instance
|
82
|
+
async def generate_streaming_response(app: 'SimpleChatApp', messages: List[Dict], model: str, style: str, client: Any, callback: Any) -> str:
|
83
|
+
"""Generate a streaming response from the model"""
|
84
|
+
logger.info(f"Starting streaming response with model: {model}")
|
85
|
+
full_response = ""
|
86
|
+
buffer = []
|
87
|
+
last_update = time.time()
|
88
|
+
update_interval = 0.1 # Update UI every 100ms
|
89
|
+
|
90
|
+
try:
|
91
|
+
async for chunk in client.generate_stream(messages, model, style):
|
92
|
+
# Check if generation was cancelled by the app (e.g., via escape key)
|
93
|
+
if not app.is_generating:
|
94
|
+
logger.info("Generation cancelled by app flag.")
|
95
|
+
break # Exit the loop immediately
|
96
|
+
|
97
|
+
if chunk: # Only process non-empty chunks
|
98
|
+
buffer.append(chunk)
|
99
|
+
current_time = time.time()
|
100
|
+
|
101
|
+
# Update UI if enough time has passed or buffer is large
|
102
|
+
if current_time - last_update >= update_interval or len(''.join(buffer)) > 100:
|
103
|
+
new_content = ''.join(buffer)
|
104
|
+
full_response += new_content
|
105
|
+
# Check again before calling callback, in case it was cancelled during chunk processing
|
106
|
+
if not app.is_generating:
|
107
|
+
logger.info("Generation cancelled before UI update.")
|
108
|
+
break
|
109
|
+
await callback(full_response)
|
110
|
+
buffer = []
|
111
|
+
last_update = current_time
|
112
|
+
|
113
|
+
# Small delay to let UI catch up
|
114
|
+
await asyncio.sleep(0.05)
|
115
|
+
|
116
|
+
# Send any remaining content if generation wasn't cancelled
|
117
|
+
if buffer and app.is_generating:
|
118
|
+
new_content = ''.join(buffer)
|
119
|
+
full_response += new_content
|
120
|
+
await callback(full_response)
|
121
|
+
|
122
|
+
if app.is_generating:
|
123
|
+
logger.info("Streaming response completed normally.")
|
124
|
+
else:
|
125
|
+
logger.info("Streaming response loop exited due to cancellation.")
|
126
|
+
|
127
|
+
return full_response
|
128
|
+
except Exception as e:
|
129
|
+
logger.error(f"Error in streaming response: {str(e)}")
|
130
|
+
# Ensure the app knows generation stopped on error
|
131
|
+
app.is_generating = False
|
132
|
+
raise
|
133
|
+
|
134
|
+
def ensure_ollama_running() -> bool:
|
135
|
+
"""
|
136
|
+
Check if Ollama is running and try to start it if not.
|
137
|
+
Returns True if Ollama is running after check/start attempt.
|
138
|
+
"""
|
139
|
+
import requests
|
140
|
+
try:
|
141
|
+
logger.info("Checking if Ollama is running...")
|
142
|
+
response = requests.get("http://localhost:11434/api/tags", timeout=2)
|
143
|
+
if response.status_code == 200:
|
144
|
+
logger.info("Ollama is running")
|
145
|
+
return True
|
146
|
+
else:
|
147
|
+
logger.warning(f"Ollama returned status code: {response.status_code}")
|
148
|
+
return False
|
149
|
+
except requests.exceptions.ConnectionError:
|
150
|
+
logger.info("Ollama not running, attempting to start...")
|
151
|
+
try:
|
152
|
+
# Try to start Ollama
|
153
|
+
process = subprocess.Popen(
|
154
|
+
["ollama", "serve"],
|
155
|
+
stdout=subprocess.PIPE,
|
156
|
+
stderr=subprocess.PIPE,
|
157
|
+
text=True
|
158
|
+
)
|
159
|
+
|
160
|
+
# Wait a moment for it to start
|
161
|
+
import time
|
162
|
+
time.sleep(2)
|
163
|
+
|
164
|
+
# Check if process is still running
|
165
|
+
if process.poll() is None:
|
166
|
+
logger.info("Ollama server started successfully")
|
167
|
+
# Check if we can connect
|
168
|
+
try:
|
169
|
+
response = requests.get("http://localhost:11434/api/tags", timeout=2)
|
170
|
+
if response.status_code == 200:
|
171
|
+
logger.info("Successfully connected to Ollama")
|
172
|
+
return True
|
173
|
+
else:
|
174
|
+
logger.error(f"Ollama returned status code: {response.status_code}")
|
175
|
+
except Exception as e:
|
176
|
+
logger.error(f"Failed to connect to Ollama after starting: {str(e)}")
|
177
|
+
else:
|
178
|
+
stdout, stderr = process.communicate()
|
179
|
+
logger.error(f"Ollama failed to start. stdout: {stdout}, stderr: {stderr}")
|
180
|
+
except FileNotFoundError:
|
181
|
+
logger.error("Ollama command not found. Please ensure Ollama is installed.")
|
182
|
+
except Exception as e:
|
183
|
+
logger.error(f"Error starting Ollama: {str(e)}")
|
184
|
+
except Exception as e:
|
185
|
+
logger.error(f"Error checking Ollama status: {str(e)}")
|
186
|
+
|
187
|
+
return False
|
188
|
+
|
189
|
+
def save_settings_to_config(model: str, style: str) -> None:
|
190
|
+
"""Save settings to global config file"""
|
191
|
+
logger.info(f"Saving settings to config - model: {model}, style: {style}")
|
192
|
+
CONFIG["default_model"] = model
|
193
|
+
CONFIG["default_style"] = style
|
194
|
+
save_config(CONFIG)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: chat-console
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.991.dev1
|
4
4
|
Summary: A command-line interface for chatting with LLMs, storing chats and (future) rag interactions
|
5
5
|
Home-page: https://github.com/wazacraftrfid/chat-console
|
6
6
|
Author: Johnathan Greenaway
|
@@ -14,7 +14,7 @@ with open(os.path.join("app", "__init__.py"), "r", encoding="utf-8") as f:
|
|
14
14
|
|
15
15
|
setup(
|
16
16
|
name="chat-console",
|
17
|
-
version="0.1.
|
17
|
+
version="0.1.991.dev1",
|
18
18
|
author="Johnathan Greenaway",
|
19
19
|
author_email="john@fimbriata.dev",
|
20
20
|
description="A command-line interface for chatting with LLMs, storing chats and (future) rag interactions",
|
@@ -1,111 +0,0 @@
|
|
1
|
-
import os
|
2
|
-
import json
|
3
|
-
import time
|
4
|
-
import asyncio
|
5
|
-
import subprocess
|
6
|
-
import logging
|
7
|
-
from typing import Optional, Dict, Any, List
|
8
|
-
from .config import CONFIG, save_config
|
9
|
-
|
10
|
-
# Set up logging
|
11
|
-
logging.basicConfig(level=logging.INFO)
|
12
|
-
logger = logging.getLogger(__name__)
|
13
|
-
|
14
|
-
async def generate_streaming_response(messages: List[Dict], model: str, style: str, client: Any, callback: Any) -> str:
|
15
|
-
"""Generate a streaming response from the model"""
|
16
|
-
logger.info(f"Starting streaming response with model: {model}")
|
17
|
-
full_response = ""
|
18
|
-
buffer = []
|
19
|
-
last_update = time.time()
|
20
|
-
update_interval = 0.1 # Update UI every 100ms
|
21
|
-
|
22
|
-
try:
|
23
|
-
async for chunk in client.generate_stream(messages, model, style):
|
24
|
-
if chunk: # Only process non-empty chunks
|
25
|
-
buffer.append(chunk)
|
26
|
-
current_time = time.time()
|
27
|
-
|
28
|
-
# Update UI if enough time has passed or buffer is large
|
29
|
-
if current_time - last_update >= update_interval or len(''.join(buffer)) > 100:
|
30
|
-
new_content = ''.join(buffer)
|
31
|
-
full_response += new_content
|
32
|
-
await callback(full_response)
|
33
|
-
buffer = []
|
34
|
-
last_update = current_time
|
35
|
-
|
36
|
-
# Small delay to let UI catch up
|
37
|
-
await asyncio.sleep(0.05)
|
38
|
-
|
39
|
-
# Send any remaining content
|
40
|
-
if buffer:
|
41
|
-
new_content = ''.join(buffer)
|
42
|
-
full_response += new_content
|
43
|
-
await callback(full_response)
|
44
|
-
|
45
|
-
logger.info("Streaming response completed")
|
46
|
-
return full_response
|
47
|
-
except Exception as e:
|
48
|
-
logger.error(f"Error in streaming response: {str(e)}")
|
49
|
-
raise
|
50
|
-
|
51
|
-
def ensure_ollama_running() -> bool:
|
52
|
-
"""
|
53
|
-
Check if Ollama is running and try to start it if not.
|
54
|
-
Returns True if Ollama is running after check/start attempt.
|
55
|
-
"""
|
56
|
-
import requests
|
57
|
-
try:
|
58
|
-
logger.info("Checking if Ollama is running...")
|
59
|
-
response = requests.get("http://localhost:11434/api/tags", timeout=2)
|
60
|
-
if response.status_code == 200:
|
61
|
-
logger.info("Ollama is running")
|
62
|
-
return True
|
63
|
-
else:
|
64
|
-
logger.warning(f"Ollama returned status code: {response.status_code}")
|
65
|
-
return False
|
66
|
-
except requests.exceptions.ConnectionError:
|
67
|
-
logger.info("Ollama not running, attempting to start...")
|
68
|
-
try:
|
69
|
-
# Try to start Ollama
|
70
|
-
process = subprocess.Popen(
|
71
|
-
["ollama", "serve"],
|
72
|
-
stdout=subprocess.PIPE,
|
73
|
-
stderr=subprocess.PIPE,
|
74
|
-
text=True
|
75
|
-
)
|
76
|
-
|
77
|
-
# Wait a moment for it to start
|
78
|
-
import time
|
79
|
-
time.sleep(2)
|
80
|
-
|
81
|
-
# Check if process is still running
|
82
|
-
if process.poll() is None:
|
83
|
-
logger.info("Ollama server started successfully")
|
84
|
-
# Check if we can connect
|
85
|
-
try:
|
86
|
-
response = requests.get("http://localhost:11434/api/tags", timeout=2)
|
87
|
-
if response.status_code == 200:
|
88
|
-
logger.info("Successfully connected to Ollama")
|
89
|
-
return True
|
90
|
-
else:
|
91
|
-
logger.error(f"Ollama returned status code: {response.status_code}")
|
92
|
-
except Exception as e:
|
93
|
-
logger.error(f"Failed to connect to Ollama after starting: {str(e)}")
|
94
|
-
else:
|
95
|
-
stdout, stderr = process.communicate()
|
96
|
-
logger.error(f"Ollama failed to start. stdout: {stdout}, stderr: {stderr}")
|
97
|
-
except FileNotFoundError:
|
98
|
-
logger.error("Ollama command not found. Please ensure Ollama is installed.")
|
99
|
-
except Exception as e:
|
100
|
-
logger.error(f"Error starting Ollama: {str(e)}")
|
101
|
-
except Exception as e:
|
102
|
-
logger.error(f"Error checking Ollama status: {str(e)}")
|
103
|
-
|
104
|
-
return False
|
105
|
-
|
106
|
-
def save_settings_to_config(model: str, style: str) -> None:
|
107
|
-
"""Save settings to global config file"""
|
108
|
-
logger.info(f"Saving settings to config - model: {model}, style: {style}")
|
109
|
-
CONFIG["default_model"] = model
|
110
|
-
CONFIG["default_style"] = style
|
111
|
-
save_config(CONFIG)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/chat_console.egg-info/dependency_links.txt
RENAMED
File without changes
|
{chat_console-0.1.95.dev1 → chat_console-0.1.991.dev1}/chat_console.egg-info/entry_points.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|